diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index b291cf2bcd..7f7cec9cda 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,8 +1,10 @@ /web/ui @juliusv /web/ui/module @juliusv @nexucis -/storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie -/storage/remote/otlptranslator @gouthamve @jesusvazquez +/storage/remote @cstyan @bwplotka @tomwilkie +/storage/remote/otlptranslator @aknuds1 @jesusvazquez /discovery/kubernetes @brancz /tsdb @jesusvazquez /promql @roidelapluie /cmd/promtool @dgl +/documentation/prometheus-mixin @metalmatze + diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f4d17b3596..bb4e2d24c9 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,4 +1,4 @@ -blank_issues_enabled: false +blank_issues_enabled: true contact_links: - name: Prometheus Community Support url: https://prometheus.io/community/ diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 95df72dd0e..cf90177b1d 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,4 +1,8 @@ - - - - - -{{ end }} - -{{/* LHS menu, should be passed . */}} -{{ define "menu" }} -
- -
-{{ end }} - -{{/* Helper, pass (args . path name) */}} -{{ define "_menuItem" }} - -{{ end }} - diff --git a/console_libraries/prom.lib b/console_libraries/prom.lib deleted file mode 100644 index d7d436f947..0000000000 --- a/console_libraries/prom.lib +++ /dev/null @@ -1,138 +0,0 @@ -{{/* vim: set ft=html: */}} -{{/* Load Prometheus console library JS/CSS. Should go in */}} -{{ define "prom_console_head" }} - - - - - - - - - - - - - -{{ end }} - -{{/* Top of all pages. */}} -{{ define "head" -}} - - - -{{ template "prom_console_head" }} - - -{{ template "navbar" . }} - -{{ template "menu" . }} -{{ end }} - -{{ define "__prom_query_drilldown_noop" }}{{ . }}{{ end }} -{{ define "humanize" }}{{ humanize . }}{{ end }} -{{ define "humanizeNoSmallPrefix" }}{{ if and (lt . 1.0) (gt . -1.0) }}{{ printf "%.3g" . }}{{ else }}{{ humanize . }}{{ end }}{{ end }} -{{ define "humanize1024" }}{{ humanize1024 . }}{{ end }} -{{ define "humanizeDuration" }}{{ humanizeDuration . }}{{ end }} -{{ define "humanizePercentage" }}{{ humanizePercentage . }}{{ end }} -{{ define "humanizeTimestamp" }}{{ humanizeTimestamp . }}{{ end }} -{{ define "printf.1f" }}{{ printf "%.1f" . }}{{ end }} -{{ define "printf.3g" }}{{ printf "%.3g" . }}{{ end }} - -{{/* prom_query_drilldown (args expr suffix? renderTemplate?) -Displays the result of the expression, with a link to /graph for it. - -renderTemplate is the name of the template to use to render the value. -*/}} -{{ define "prom_query_drilldown" }} -{{ $expr := .arg0 }}{{ $suffix := (or .arg1 "") }}{{ $renderTemplate := (or .arg2 "__prom_query_drilldown_noop") }} -{{ with query $expr }}{{tmpl $renderTemplate ( . | first | value )}}{{ $suffix }}{{ else }}-{{ end }} -{{ end }} - -{{ define "prom_path" }}/consoles/{{ .Path }}?{{ range $param, $value := .Params }}{{ $param }}={{ $value }}&{{ end }}{{ end }}" - -{{ define "prom_right_table_head" }} -
- -{{ end }} -{{ define "prom_right_table_tail" }} -
-
-{{ end }} - -{{/* RHS table head, pass job name. Should be used after prom_right_table_head. */}} -{{ define "prom_right_table_job_head" }} - - {{ . }} - {{ template "prom_query_drilldown" (args (printf "sum(up{job='%s'})" .)) }} / {{ template "prom_query_drilldown" (args (printf "count(up{job='%s'})" .)) }} - - - CPU - {{ template "prom_query_drilldown" (args (printf "avg by(job)(irate(process_cpu_seconds_total{job='%s'}[5m]))" .) "s/s" "humanizeNoSmallPrefix") }} - - - Memory - {{ template "prom_query_drilldown" (args (printf "avg by(job)(process_resident_memory_bytes{job='%s'})" .) "B" "humanize1024") }} - -{{ end }} - - -{{ define "prom_content_head" }} -
-
-{{ template "prom_graph_timecontrol" . }} -{{ end }} -{{ define "prom_content_tail" }} -
-
-{{ end }} - -{{ define "prom_graph_timecontrol" }} -
-
-
- -
-
- -
-
-
- - - -
-
-
- -
-{{ end }} - -{{/* Bottom of all pages. */}} -{{ define "tail" }} - - -{{ end }} diff --git a/consoles/index.html.example b/consoles/index.html.example deleted file mode 100644 index c725d30dea..0000000000 --- a/consoles/index.html.example +++ /dev/null @@ -1,28 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Overview

-

These are example consoles for Prometheus.

- -

These consoles expect exporters to have the following job labels:

- - - - - - - - - - - - - -
ExporterJob label
Node Exporternode
Prometheusprometheus
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-cpu.html b/consoles/node-cpu.html deleted file mode 100644 index d6c515d2dd..0000000000 --- a/consoles/node-cpu.html +++ /dev/null @@ -1,60 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - CPU(s): {{ template "prom_query_drilldown" (args (printf "scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance)) }} - -{{ range printf "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='%s'}[5m])) * 100 / scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance .Params.instance | query | sortByLabel "mode" }} - - {{ .Labels.mode | title }} CPU - {{ .Value | printf "%.1f" }}% - -{{ end }} - Misc - - Processes Running - {{ template "prom_query_drilldown" (args (printf "node_procs_running{job='node',instance='%s'}" .Params.instance) "" "humanize") }} - - - Processes Blocked - {{ template "prom_query_drilldown" (args (printf "node_procs_blocked{job='node',instance='%s'}" .Params.instance) "" "humanize") }} - - - Forks - {{ template "prom_query_drilldown" (args (printf "irate(node_forks_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - Context Switches - {{ template "prom_query_drilldown" (args (printf "irate(node_context_switches_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - Interrupts - {{ template "prom_query_drilldown" (args (printf "irate(node_intr_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - 1m Loadavg - {{ template "prom_query_drilldown" (args (printf "node_load1{job='node',instance='%s'}" .Params.instance)) }} - - - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node CPU - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

CPU Usage

-
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-disk.html b/consoles/node-disk.html deleted file mode 100644 index ffff41b797..0000000000 --- a/consoles/node-disk.html +++ /dev/null @@ -1,78 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Disks - -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} - {{ .Labels.device }} - - Utilization - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }} - - - Throughput - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }} - - - Avg Read Time - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_reads_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }} - - - Avg Write Time - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_write_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_writes_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }} - -{{ end }} - - Filesystem Fullness - -{{ define "roughlyNearZero" }} -{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} -{{ end }} -{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} - - {{ .Labels.mountpoint }} - {{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }} - -{{ end }} - - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node Disk - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

Disk I/O Utilization

-
- -

Filesystem Usage

-
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-overview.html b/consoles/node-overview.html deleted file mode 100644 index 92f53ba871..0000000000 --- a/consoles/node-overview.html +++ /dev/null @@ -1,121 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - Overview - - User CPU - {{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }} - - - System CPU - {{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='system'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }} - - - Memory Total - {{ template "prom_query_drilldown" (args (printf "node_memory_MemTotal_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Memory Free - {{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Network - -{{ range printf "node_network_receive_bytes_total{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Received - {{ template "prom_query_drilldown" (args (printf "irate(node_network_receive_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }} - - - {{ .Labels.device }} Transmitted - {{ template "prom_query_drilldown" (args (printf "irate(node_network_transmit_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }} - -{{ end }} - - Disks - -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s',device!~'^(md\\\\d+$|dm-)'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Utilization - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }} - -{{ end }} -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Throughput - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }} - -{{ end }} - - Filesystem Fullness - -{{ define "roughlyNearZero" }} -{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} -{{ end }} -{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} - - {{ .Labels.mountpoint }} - {{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }} - -{{ end }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

CPU Usage

-
- - -

Disk I/O Utilization

-
- - -

Memory

-
- - -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node.html b/consoles/node.html deleted file mode 100644 index 9a37544eee..0000000000 --- a/consoles/node.html +++ /dev/null @@ -1,35 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Node - {{ template "prom_query_drilldown" (args "sum(up{job='node'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='node'})") }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node

- - - - - - - - -{{ range query "up{job='node'}" | sortByLabel "instance" }} - - - Yes{{ else }} class="alert-danger">No{{ end }} - - - -{{ else }} - -{{ end }} -
NodeUpCPU
Used
Memory
Available
{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance)(irate(node_cpu_seconds_total{job='node',mode='idle',instance='%s'}[5m])))" .Labels.instance) "%" "printf.1f") }}{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}
No nodes found.
- - -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/prometheus-overview.html b/consoles/prometheus-overview.html deleted file mode 100644 index 08e027de06..0000000000 --- a/consoles/prometheus-overview.html +++ /dev/null @@ -1,96 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Overview - - - CPU - {{ template "prom_query_drilldown" (args (printf "irate(process_cpu_seconds_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "s/s" "humanizeNoSmallPrefix") }} - - - Memory - {{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Version - {{ with query (printf "prometheus_build_info{job='prometheus',instance='%s'}" .Params.instance) }}{{. | first | label "version"}}{{end}} - - - - Storage - - - Ingested Samples - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "/s" "humanizeNoSmallPrefix") }} - - - Head Series - {{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_head_series{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - Blocks Loaded - {{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_blocks_loaded{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - Rules - - - Evaluation Duration - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_evaluator_duration_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_evaluator_duration_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }} - - - Notification Latency - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_notifications_latency_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_notifications_latency_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }} - - - Notification Queue - {{ template "prom_query_drilldown" (args (printf "prometheus_notifications_queue_length{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - HTTP Server - -{{ range printf "prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s'}" .Params.instance | query | sortByLabel "handler" }} - - {{ .Labels.handler }} - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s',handler='%s'}[5m])" .Labels.instance .Labels.handler) "/s" "humanizeNoSmallPrefix") }} - -{{ end }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -
-

Prometheus Overview - {{ .Params.instance }}

- -

Ingested Samples

-
- - -

HTTP Server

-
- -
-{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/prometheus.html b/consoles/prometheus.html deleted file mode 100644 index e0d026376d..0000000000 --- a/consoles/prometheus.html +++ /dev/null @@ -1,34 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Prometheus - {{ template "prom_query_drilldown" (args "sum(up{job='prometheus'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='prometheus'})") }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Prometheus

- - - - - - - - -{{ range query "up{job='prometheus'}" | sortByLabel "instance" }} - - - - - - -{{ else }} - -{{ end }} -
PrometheusUpIngested SamplesMemory
{{ .Labels.instance }}Yes{{ else }} class="alert-danger">No{{ end }}{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Labels.instance) "/s" "humanizeNoSmallPrefix") }}{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Labels.instance) "B" "humanize1024")}}
No devices found.
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/discovery/README.md b/discovery/README.md index 19b579b399..4c06608625 100644 --- a/discovery/README.md +++ b/discovery/README.md @@ -234,6 +234,11 @@ type Config interface { type DiscovererOptions struct { Logger log.Logger + + // A registerer for the Discoverer's metrics. + Registerer prometheus.Registerer + + HTTPClientOptions []config.HTTPClientOption } ``` diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index 86d76627e1..a44912481a 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net" + "strconv" "strings" "time" @@ -30,6 +31,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -40,28 +42,29 @@ import ( ) const ( - ec2Label = model.MetaLabelPrefix + "ec2_" - ec2LabelAMI = ec2Label + "ami" - ec2LabelAZ = ec2Label + "availability_zone" - ec2LabelAZID = ec2Label + "availability_zone_id" - ec2LabelArch = ec2Label + "architecture" - ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses" - ec2LabelInstanceID = ec2Label + "instance_id" - ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle" - ec2LabelInstanceState = ec2Label + "instance_state" - ec2LabelInstanceType = ec2Label + "instance_type" - ec2LabelOwnerID = ec2Label + "owner_id" - ec2LabelPlatform = ec2Label + "platform" - ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id" - ec2LabelPrivateDNS = ec2Label + "private_dns_name" - ec2LabelPrivateIP = ec2Label + "private_ip" - ec2LabelPublicDNS = ec2Label + "public_dns_name" - ec2LabelPublicIP = ec2Label + "public_ip" - ec2LabelRegion = ec2Label + "region" - ec2LabelSubnetID = ec2Label + "subnet_id" - ec2LabelTag = ec2Label + "tag_" - ec2LabelVPCID = ec2Label + "vpc_id" - ec2LabelSeparator = "," + ec2Label = model.MetaLabelPrefix + "ec2_" + ec2LabelAMI = ec2Label + "ami" + ec2LabelAZ = ec2Label + "availability_zone" + ec2LabelAZID = ec2Label + "availability_zone_id" + ec2LabelArch = ec2Label + "architecture" + ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses" + ec2LabelInstanceID = ec2Label + "instance_id" + ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle" + ec2LabelInstanceState = ec2Label + "instance_state" + ec2LabelInstanceType = ec2Label + "instance_type" + ec2LabelOwnerID = ec2Label + "owner_id" + ec2LabelPlatform = ec2Label + "platform" + ec2LabelPrimaryIPv6Addresses = ec2Label + "primary_ipv6_addresses" + ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id" + ec2LabelPrivateDNS = ec2Label + "private_dns_name" + ec2LabelPrivateIP = ec2Label + "private_ip" + ec2LabelPublicDNS = ec2Label + "public_dns_name" + ec2LabelPublicIP = ec2Label + "public_ip" + ec2LabelRegion = ec2Label + "region" + ec2LabelSubnetID = ec2Label + "subnet_id" + ec2LabelTag = ec2Label + "tag_" + ec2LabelVPCID = ec2Label + "vpc_id" + ec2LabelSeparator = "," ) // DefaultEC2SDConfig is the default EC2 SD configuration. @@ -96,12 +99,19 @@ type EC2SDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } +// NewDiscovererMetrics implements discovery.Config. +func (*EC2SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &ec2Metrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the EC2 Config. func (*EC2SDConfig) Name() string { return "ec2" } // NewDiscoverer returns a Discoverer for the EC2 Config. func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewEC2Discovery(c, opts.Logger), nil + return NewEC2Discovery(c, opts.Logger, opts.Metrics) } // UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config. @@ -129,7 +139,7 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return errors.New("EC2 SD configuration filter values cannot be empty") } } - return nil + return c.HTTPClientConfig.Validate() } // EC2Discovery periodically performs EC2-SD requests. It implements @@ -147,7 +157,12 @@ type EC2Discovery struct { } // NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets. -func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery { +func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) { + m, ok := metrics.(*ec2Metrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + if logger == nil { logger = log.NewNopLogger() } @@ -156,12 +171,15 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery { cfg: conf, } d.Discovery = refresh.NewDiscovery( - logger, - "ec2", - time.Duration(d.cfg.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "ec2", + Interval: time.Duration(d.cfg.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) - return d + return d, nil } func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) { @@ -263,7 +281,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error if inst.PrivateDnsName != nil { labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName) } - addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) + addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port)) labels[model.AddressLabel] = model.LabelValue(addr) if inst.Platform != nil { @@ -300,6 +318,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error var subnets []string var ipv6addrs []string + var primaryipv6addrs []string subnetsMap := make(map[string]struct{}) for _, eni := range inst.NetworkInterfaces { if eni.SubnetId == nil { @@ -313,6 +332,15 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error for _, ipv6addr := range eni.Ipv6Addresses { ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address) + if *ipv6addr.IsPrimaryIpv6 { + // we might have to extend the slice with more than one element + // that could leave empty strings in the list which is intentional + // to keep the position/device index information + for int64(len(primaryipv6addrs)) <= *eni.Attachment.DeviceIndex { + primaryipv6addrs = append(primaryipv6addrs, "") + } + primaryipv6addrs[*eni.Attachment.DeviceIndex] = *ipv6addr.Ipv6Address + } } } labels[ec2LabelSubnetID] = model.LabelValue( @@ -325,6 +353,12 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error strings.Join(ipv6addrs, ec2LabelSeparator) + ec2LabelSeparator) } + if len(primaryipv6addrs) > 0 { + labels[ec2LabelPrimaryIPv6Addresses] = model.LabelValue( + ec2LabelSeparator + + strings.Join(primaryipv6addrs, ec2LabelSeparator) + + ec2LabelSeparator) + } } for _, t := range inst.Tags { diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index e671769ca3..0ad7f2d541 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net" + "strconv" "strings" "time" @@ -29,6 +30,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/lightsail" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -79,12 +81,19 @@ type LightsailSDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } +// NewDiscovererMetrics implements discovery.Config. +func (*LightsailSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &lightsailMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the Lightsail Config. func (*LightsailSDConfig) Name() string { return "lightsail" } // NewDiscoverer returns a Discoverer for the Lightsail Config. func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewLightsailDiscovery(c, opts.Logger), nil + return NewLightsailDiscovery(c, opts.Logger, opts.Metrics) } // UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config. @@ -109,7 +118,7 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) err } c.Region = region } - return nil + return c.HTTPClientConfig.Validate() } // LightsailDiscovery periodically performs Lightsail-SD requests. It implements @@ -121,20 +130,29 @@ type LightsailDiscovery struct { } // NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets. -func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger) *LightsailDiscovery { +func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) { + m, ok := metrics.(*lightsailMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + if logger == nil { logger = log.NewNopLogger() } + d := &LightsailDiscovery{ cfg: conf, } d.Discovery = refresh.NewDiscovery( - logger, - "lightsail", - time.Duration(d.cfg.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "lightsail", + Interval: time.Duration(d.cfg.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) - return d + return d, nil } func (d *LightsailDiscovery) lightsailClient() (*lightsail.Lightsail, error) { @@ -212,7 +230,7 @@ func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, lightsailLabelRegion: model.LabelValue(d.cfg.Region), } - addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) + addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port)) labels[model.AddressLabel] = model.LabelValue(addr) if inst.PublicIpAddress != nil { diff --git a/discovery/aws/metrics_ec2.go b/discovery/aws/metrics_ec2.go new file mode 100644 index 0000000000..73c061c3d8 --- /dev/null +++ b/discovery/aws/metrics_ec2.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "github.com/prometheus/prometheus/discovery" +) + +type ec2Metrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +var _ discovery.DiscovererMetrics = (*ec2Metrics)(nil) + +// Register implements discovery.DiscovererMetrics. +func (m *ec2Metrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *ec2Metrics) Unregister() {} diff --git a/discovery/aws/metrics_lightsail.go b/discovery/aws/metrics_lightsail.go new file mode 100644 index 0000000000..69593e701d --- /dev/null +++ b/discovery/aws/metrics_lightsail.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "github.com/prometheus/prometheus/discovery" +) + +type lightsailMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +var _ discovery.DiscovererMetrics = (*lightsailMetrics)(nil) + +// Register implements discovery.DiscovererMetrics. +func (m *lightsailMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *lightsailMetrics) Unregister() {} diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 098fbb4c5f..70d95b9f3a 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -17,21 +17,29 @@ import ( "context" "errors" "fmt" + "math/rand" "net" "net/http" + "strconv" "strings" "sync" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + cache "github.com/Code-Hex/go-generics-cache" + "github.com/Code-Hex/go-generics-cache/policy/lru" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -58,6 +66,7 @@ const ( azureLabelMachineSize = azureLabel + "machine_size" authMethodOAuth = "OAuth" + authMethodSDK = "SDK" authMethodManagedIdentity = "ManagedIdentity" ) @@ -68,21 +77,34 @@ var ( DefaultSDConfig = SDConfig{ Port: 80, RefreshInterval: model.Duration(5 * time.Minute), - Environment: azure.PublicCloud.Name, + Environment: "AzurePublicCloud", AuthenticationMethod: authMethodOAuth, HTTPClientConfig: config_util.DefaultHTTPClientConfig, } - - failuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_azure_failures_total", - Help: "Number of Azure service discovery refresh failures.", - }) ) +var environments = map[string]cloud.Configuration{ + "AZURECHINACLOUD": cloud.AzureChina, + "AZURECLOUD": cloud.AzurePublic, + "AZUREGERMANCLOUD": cloud.AzurePublic, + "AZUREPUBLICCLOUD": cloud.AzurePublic, + "AZUREUSGOVERNMENT": cloud.AzureGovernment, + "AZUREUSGOVERNMENTCLOUD": cloud.AzureGovernment, +} + +// CloudConfigurationFromName returns cloud configuration based on the common name specified. +func CloudConfigurationFromName(name string) (cloud.Configuration, error) { + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("there is no cloud configuration matching the name %q", name) + } + + return env, nil +} + func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(failuresCount) } // SDConfig is the configuration for Azure based service discovery. @@ -100,12 +122,17 @@ type SDConfig struct { HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return newDiscovererMetrics(reg, rmi) +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "azure" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger), nil + return NewDiscovery(c, opts.Logger, opts.Metrics) } func validateAuthParam(param, name string) error { @@ -123,7 +150,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if err != nil { return err } - if err = validateAuthParam(c.SubscriptionID, "subscription_id"); err != nil { return err } @@ -140,118 +166,162 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } } - if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity { - return fmt.Errorf("unknown authentication_type %q. Supported types are %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity) + if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity && c.AuthenticationMethod != authMethodSDK { + return fmt.Errorf("unknown authentication_type %q. Supported types are %q, %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity, authMethodSDK) } - return nil + return c.HTTPClientConfig.Validate() } type Discovery struct { *refresh.Discovery - logger log.Logger - cfg *SDConfig - port int + logger log.Logger + cfg *SDConfig + port int + cache *cache.Cache[string, *armnetwork.Interface] + metrics *azureMetrics } // NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets. -func NewDiscovery(cfg *SDConfig, logger log.Logger) *Discovery { +func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*azureMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + if logger == nil { logger = log.NewNopLogger() } + l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000))) d := &Discovery{ - cfg: cfg, - port: cfg.Port, - logger: logger, + cfg: cfg, + port: cfg.Port, + logger: logger, + cache: l, + metrics: m, } + d.Discovery = refresh.NewDiscovery( - logger, - "azure", - time.Duration(cfg.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "azure", + Interval: time.Duration(cfg.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) - return d + + return d, nil +} + +type client interface { + getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) + getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) + getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) + getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) + getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) } // azureClient represents multiple Azure Resource Manager providers. type azureClient struct { - nic network.InterfacesClient - vm compute.VirtualMachinesClient - vmss compute.VirtualMachineScaleSetsClient - vmssvm compute.VirtualMachineScaleSetVMsClient + nic *armnetwork.InterfacesClient + vm *armcompute.VirtualMachinesClient + vmss *armcompute.VirtualMachineScaleSetsClient + vmssvm *armcompute.VirtualMachineScaleSetVMsClient + logger log.Logger } +var _ client = &azureClient{} + // createAzureClient is a helper function for creating an Azure compute client to ARM. -func createAzureClient(cfg SDConfig) (azureClient, error) { - env, err := azure.EnvironmentFromName(cfg.Environment) +func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) { + cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment) if err != nil { - return azureClient{}, err + return &azureClient{}, err } - activeDirectoryEndpoint := env.ActiveDirectoryEndpoint - resourceManagerEndpoint := env.ResourceManagerEndpoint - var c azureClient + c.logger = logger - var spt *adal.ServicePrincipalToken - - switch cfg.AuthenticationMethod { - case authMethodManagedIdentity: - spt, err = adal.NewServicePrincipalTokenFromManagedIdentity(resourceManagerEndpoint, &adal.ManagedIdentityOptions{ClientID: cfg.ClientID}) - if err != nil { - return azureClient{}, err - } - case authMethodOAuth: - oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, cfg.TenantID) - if err != nil { - return azureClient{}, err - } + telemetry := policy.TelemetryOptions{ + ApplicationID: userAgent, + } - spt, err = adal.NewServicePrincipalToken(*oauthConfig, cfg.ClientID, string(cfg.ClientSecret), resourceManagerEndpoint) - if err != nil { - return azureClient{}, err - } + credential, err := newCredential(cfg, policy.ClientOptions{ + Cloud: cloudConfiguration, + Telemetry: telemetry, + }) + if err != nil { + return &azureClient{}, err } client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd") if err != nil { - return azureClient{}, err + return &azureClient{}, err + } + options := &arm.ClientOptions{ + ClientOptions: policy.ClientOptions{ + Transport: client, + Cloud: cloudConfiguration, + Telemetry: telemetry, + }, } - sender := autorest.DecorateSender(client) - preparer := autorest.WithUserAgent(userAgent) - - bearerAuthorizer := autorest.NewBearerAuthorizer(spt) - c.vm = compute.NewVirtualMachinesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) - c.vm.Authorizer = bearerAuthorizer - c.vm.Sender = sender - c.vm.RequestInspector = preparer + c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options) + if err != nil { + return &azureClient{}, err + } - c.nic = network.NewInterfacesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) - c.nic.Authorizer = bearerAuthorizer - c.nic.Sender = sender - c.nic.RequestInspector = preparer + c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options) + if err != nil { + return &azureClient{}, err + } - c.vmss = compute.NewVirtualMachineScaleSetsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) - c.vmss.Authorizer = bearerAuthorizer - c.vmss.Sender = sender - c.vmss.RequestInspector = preparer + c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options) + if err != nil { + return &azureClient{}, err + } - c.vmssvm = compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) - c.vmssvm.Authorizer = bearerAuthorizer - c.vmssvm.Sender = sender - c.vmssvm.RequestInspector = preparer + c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options) + if err != nil { + return &azureClient{}, err + } - return c, nil + return &c, nil } -// azureResource represents a resource identifier in Azure. -type azureResource struct { - Name string - ResourceGroup string +func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azcore.TokenCredential, error) { + var credential azcore.TokenCredential + switch cfg.AuthenticationMethod { + case authMethodManagedIdentity: + options := &azidentity.ManagedIdentityCredentialOptions{ClientOptions: policyClientOptions, ID: azidentity.ClientID(cfg.ClientID)} + managedIdentityCredential, err := azidentity.NewManagedIdentityCredential(options) + if err != nil { + return nil, err + } + credential = azcore.TokenCredential(managedIdentityCredential) + case authMethodOAuth: + options := &azidentity.ClientSecretCredentialOptions{ClientOptions: policyClientOptions} + secretCredential, err := azidentity.NewClientSecretCredential(cfg.TenantID, cfg.ClientID, string(cfg.ClientSecret), options) + if err != nil { + return nil, err + } + credential = azcore.TokenCredential(secretCredential) + case authMethodSDK: + options := &azidentity.DefaultAzureCredentialOptions{ClientOptions: policyClientOptions} + if len(cfg.TenantID) != 0 { + options.TenantID = cfg.TenantID + } + sdkCredential, err := azidentity.NewDefaultAzureCredential(options) + if err != nil { + return nil, err + } + credential = azcore.TokenCredential(sdkCredential) + } + return credential, nil } -// virtualMachine represents an Azure virtual machine (which can also be created by a VMSS) +// virtualMachine represents an Azure virtual machine (which can also be created by a VMSS). type virtualMachine struct { ID string Name string @@ -260,42 +330,38 @@ type virtualMachine struct { Location string OsType string ScaleSet string + InstanceID string Tags map[string]*string NetworkInterfaces []string Size string } // Create a new azureResource object from an ID string. -func newAzureResourceFromID(id string, logger log.Logger) (azureResource, error) { - // Resource IDs have the following format. - // /subscriptions/SUBSCRIPTION_ID/resourceGroups/RESOURCE_GROUP/providers/PROVIDER/TYPE/NAME - // or if embedded resource then - // /subscriptions/SUBSCRIPTION_ID/resourceGroups/RESOURCE_GROUP/providers/PROVIDER/TYPE/NAME/TYPE/NAME - s := strings.Split(id, "/") - if len(s) != 9 && len(s) != 11 { - err := fmt.Errorf("invalid ID '%s'. Refusing to create azureResource", id) +func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, error) { + if logger == nil { + logger = log.NewNopLogger() + } + resourceID, err := arm.ParseResourceID(id) + if err != nil { + err := fmt.Errorf("invalid ID '%s': %w", id, err) level.Error(logger).Log("err", err) - return azureResource{}, err + return &arm.ResourceID{}, err } - - return azureResource{ - Name: strings.ToLower(s[8]), - ResourceGroup: strings.ToLower(s[4]), - }, nil + return resourceID, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { defer level.Debug(d.logger).Log("msg", "Azure discovery completed") - client, err := createAzureClient(*d.cfg) + client, err := createAzureClient(*d.cfg, d.logger) if err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, fmt.Errorf("could not create Azure client: %w", err) } machines, err := client.getVMs(ctx, d.cfg.ResourceGroup) if err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, fmt.Errorf("could not get virtual machines: %w", err) } @@ -304,14 +370,14 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { // Load the vms managed by scale sets. scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup) if err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, fmt.Errorf("could not get virtual machine scale sets: %w", err) } for _, scaleSet := range scaleSets { scaleSetVms, err := client.getScaleSetVMs(ctx, scaleSet) if err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, fmt.Errorf("could not get virtual machine scale set vms: %w", err) } machines = append(machines, scaleSetVms...) @@ -330,81 +396,8 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { for _, vm := range machines { go func(vm virtualMachine) { defer wg.Done() - r, err := newAzureResourceFromID(vm.ID, d.logger) - if err != nil { - ch <- target{labelSet: nil, err: err} - return - } - - labels := model.LabelSet{ - azureLabelSubscriptionID: model.LabelValue(d.cfg.SubscriptionID), - azureLabelTenantID: model.LabelValue(d.cfg.TenantID), - azureLabelMachineID: model.LabelValue(vm.ID), - azureLabelMachineName: model.LabelValue(vm.Name), - azureLabelMachineComputerName: model.LabelValue(vm.ComputerName), - azureLabelMachineOSType: model.LabelValue(vm.OsType), - azureLabelMachineLocation: model.LabelValue(vm.Location), - azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup), - azureLabelMachineSize: model.LabelValue(vm.Size), - } - - if vm.ScaleSet != "" { - labels[azureLabelMachineScaleSet] = model.LabelValue(vm.ScaleSet) - } - - for k, v := range vm.Tags { - name := strutil.SanitizeLabelName(k) - labels[azureLabelMachineTag+model.LabelName(name)] = model.LabelValue(*v) - } - - // Get the IP address information via separate call to the network provider. - for _, nicID := range vm.NetworkInterfaces { - networkInterface, err := client.getNetworkInterfaceByID(ctx, nicID) - if err != nil { - if errors.Is(err, errorNotFound) { - level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err) - } else { - ch <- target{labelSet: nil, err: err} - } - // Get out of this routine because we cannot continue without a network interface. - return - } - - if networkInterface.InterfacePropertiesFormat == nil { - continue - } - - // Unfortunately Azure does not return information on whether a VM is deallocated. - // This information is available via another API call however the Go SDK does not - // yet support this. On deallocated machines, this value happens to be nil so it - // is a cheap and easy way to determine if a machine is allocated or not. - if networkInterface.Primary == nil { - level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name) - return - } - - if *networkInterface.Primary { - for _, ip := range *networkInterface.IPConfigurations { - // IPAddress is a field defined in PublicIPAddressPropertiesFormat, - // therefore we need to validate that both are not nil. - if ip.PublicIPAddress != nil && ip.PublicIPAddress.PublicIPAddressPropertiesFormat != nil && ip.PublicIPAddress.IPAddress != nil { - labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.PublicIPAddress.IPAddress) - } - if ip.PrivateIPAddress != nil { - labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.PrivateIPAddress) - address := net.JoinHostPort(*ip.PrivateIPAddress, fmt.Sprintf("%d", d.port)) - labels[model.AddressLabel] = model.LabelValue(address) - ch <- target{labelSet: labels, err: nil} - return - } - // If we made it here, we don't have a private IP which should be impossible. - // Return an empty target and error to ensure an all or nothing situation. - err = fmt.Errorf("unable to find a private IP for VM %s", vm.Name) - ch <- target{labelSet: nil, err: err} - return - } - } - } + labelSet, err := d.vmToLabelSet(ctx, client, vm) + ch <- target{labelSet: labelSet, err: err} }(vm) } @@ -414,7 +407,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { var tg targetgroup.Group for tgt := range ch { if tgt.err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, fmt.Errorf("unable to complete Azure service discovery: %w", tgt.err) } if tgt.labelSet != nil { @@ -425,95 +418,175 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { return []*targetgroup.Group{&tg}, nil } -func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) { - var vms []virtualMachine - var result compute.VirtualMachineListResultPage - var err error - if len(resourceGroup) == 0 { - result, err = client.vm.ListAll(ctx) - } else { - result, err = client.vm.List(ctx, resourceGroup) - } +func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualMachine) (model.LabelSet, error) { + r, err := newAzureResourceFromID(vm.ID, d.logger) if err != nil { - return nil, fmt.Errorf("could not list virtual machines: %w", err) + return nil, err } - for result.NotDone() { - for _, vm := range result.Values() { - vms = append(vms, mapFromVM(vm)) + + labels := model.LabelSet{ + azureLabelSubscriptionID: model.LabelValue(d.cfg.SubscriptionID), + azureLabelTenantID: model.LabelValue(d.cfg.TenantID), + azureLabelMachineID: model.LabelValue(vm.ID), + azureLabelMachineName: model.LabelValue(vm.Name), + azureLabelMachineComputerName: model.LabelValue(vm.ComputerName), + azureLabelMachineOSType: model.LabelValue(vm.OsType), + azureLabelMachineLocation: model.LabelValue(vm.Location), + azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroupName), + azureLabelMachineSize: model.LabelValue(vm.Size), + } + + if vm.ScaleSet != "" { + labels[azureLabelMachineScaleSet] = model.LabelValue(vm.ScaleSet) + } + + for k, v := range vm.Tags { + name := strutil.SanitizeLabelName(k) + labels[azureLabelMachineTag+model.LabelName(name)] = model.LabelValue(*v) + } + + // Get the IP address information via separate call to the network provider. + for _, nicID := range vm.NetworkInterfaces { + var networkInterface *armnetwork.Interface + if v, ok := d.getFromCache(nicID); ok { + networkInterface = v + d.metrics.cacheHitCount.Add(1) + } else { + if vm.ScaleSet == "" { + networkInterface, err = client.getVMNetworkInterfaceByID(ctx, nicID) + } else { + networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID) + } + if err != nil { + if errors.Is(err, errorNotFound) { + level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err) + } else { + return nil, err + } + // Get out of this routine because we cannot continue without a network interface. + return nil, nil + } + + // Continue processing with the network interface + d.addToCache(nicID, networkInterface) } - err = result.NextWithContext(ctx) - if err != nil { - return nil, fmt.Errorf("could not list virtual machines: %w", err) + + if networkInterface.Properties == nil { + continue } - } - return vms, nil -} + // Unfortunately Azure does not return information on whether a VM is deallocated. + // This information is available via another API call however the Go SDK does not + // yet support this. On deallocated machines, this value happens to be nil so it + // is a cheap and easy way to determine if a machine is allocated or not. + if networkInterface.Properties.Primary == nil { + level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name) + return nil, nil + } -type VmssListResultPage interface { - NextWithContext(ctx context.Context) (err error) - NotDone() bool - Values() []compute.VirtualMachineScaleSet + if *networkInterface.Properties.Primary { + for _, ip := range networkInterface.Properties.IPConfigurations { + // IPAddress is a field defined in PublicIPAddressPropertiesFormat, + // therefore we need to validate that both are not nil. + if ip.Properties != nil && ip.Properties.PublicIPAddress != nil && ip.Properties.PublicIPAddress.Properties != nil && ip.Properties.PublicIPAddress.Properties.IPAddress != nil { + labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.Properties.PublicIPAddress.Properties.IPAddress) + } + if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil { + labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress) + address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, strconv.Itoa(d.port)) + labels[model.AddressLabel] = model.LabelValue(address) + return labels, nil + } + // If we made it here, we don't have a private IP which should be impossible. + // Return an empty target and error to ensure an all or nothing situation. + return nil, fmt.Errorf("unable to find a private IP for VM %s", vm.Name) + } + } + } + // TODO: Should we say something at this point? + return nil, nil } -func (client *azureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]compute.VirtualMachineScaleSet, error) { - var scaleSets []compute.VirtualMachineScaleSet - var result VmssListResultPage - var err error +func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) { + var vms []virtualMachine if len(resourceGroup) == 0 { - var rtn compute.VirtualMachineScaleSetListWithLinkResultPage - rtn, err = client.vmss.ListAll(ctx) - if err != nil { - return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err) + pager := client.vm.NewListAllPager(nil) + for pager.More() { + nextResult, err := pager.NextPage(ctx) + if err != nil { + return nil, fmt.Errorf("could not list virtual machines: %w", err) + } + for _, vm := range nextResult.Value { + vms = append(vms, mapFromVM(*vm)) + } } - result = &rtn } else { - var rtn compute.VirtualMachineScaleSetListResultPage - rtn, err = client.vmss.List(ctx, resourceGroup) - if err != nil { - return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err) + pager := client.vm.NewListPager(resourceGroup, nil) + for pager.More() { + nextResult, err := pager.NextPage(ctx) + if err != nil { + return nil, fmt.Errorf("could not list virtual machines: %w", err) + } + for _, vm := range nextResult.Value { + vms = append(vms, mapFromVM(*vm)) + } } - result = &rtn } + return vms, nil +} - for result.NotDone() { - scaleSets = append(scaleSets, result.Values()...) - err = result.NextWithContext(ctx) - if err != nil { - return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err) +func (client *azureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) { + var scaleSets []armcompute.VirtualMachineScaleSet + if len(resourceGroup) == 0 { + pager := client.vmss.NewListAllPager(nil) + for pager.More() { + nextResult, err := pager.NextPage(ctx) + if err != nil { + return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err) + } + for _, vmss := range nextResult.Value { + scaleSets = append(scaleSets, *vmss) + } + } + } else { + pager := client.vmss.NewListPager(resourceGroup, nil) + for pager.More() { + nextResult, err := pager.NextPage(ctx) + if err != nil { + return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err) + } + for _, vmss := range nextResult.Value { + scaleSets = append(scaleSets, *vmss) + } } } - return scaleSets, nil } -func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet compute.VirtualMachineScaleSet) ([]virtualMachine, error) { +func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) { var vms []virtualMachine // TODO do we really need to fetch the resourcegroup this way? - r, err := newAzureResourceFromID(*scaleSet.ID, nil) + r, err := newAzureResourceFromID(*scaleSet.ID, client.logger) if err != nil { return nil, fmt.Errorf("could not parse scale set ID: %w", err) } - result, err := client.vmssvm.List(ctx, r.ResourceGroup, *(scaleSet.Name), "", "", "") - if err != nil { - return nil, fmt.Errorf("could not list virtual machine scale set vms: %w", err) - } - for result.NotDone() { - for _, vm := range result.Values() { - vms = append(vms, mapFromVMScaleSetVM(vm, *scaleSet.Name)) - } - err = result.NextWithContext(ctx) + pager := client.vmssvm.NewListPager(r.ResourceGroupName, *(scaleSet.Name), nil) + for pager.More() { + nextResult, err := pager.NextPage(ctx) if err != nil { return nil, fmt.Errorf("could not list virtual machine scale set vms: %w", err) } + for _, vmssvm := range nextResult.Value { + vms = append(vms, mapFromVMScaleSetVM(*vmssvm, *scaleSet.Name)) + } } return vms, nil } -func mapFromVM(vm compute.VirtualMachine) virtualMachine { - osType := string(vm.StorageProfile.OsDisk.OsType) +func mapFromVM(vm armcompute.VirtualMachine) virtualMachine { + var osType string tags := map[string]*string{} networkInterfaces := []string{} var computerName string @@ -523,18 +596,23 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine { tags = vm.Tags } - if vm.NetworkProfile != nil { - for _, vmNIC := range *(vm.NetworkProfile.NetworkInterfaces) { - networkInterfaces = append(networkInterfaces, *vmNIC.ID) + if vm.Properties != nil { + if vm.Properties.StorageProfile != nil && + vm.Properties.StorageProfile.OSDisk != nil && + vm.Properties.StorageProfile.OSDisk.OSType != nil { + osType = string(*vm.Properties.StorageProfile.OSDisk.OSType) } - } - if vm.VirtualMachineProperties != nil { - if vm.VirtualMachineProperties.OsProfile != nil && vm.VirtualMachineProperties.OsProfile.ComputerName != nil { - computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName) + if vm.Properties.NetworkProfile != nil { + for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces { + networkInterfaces = append(networkInterfaces, *vmNIC.ID) + } + } + if vm.Properties.OSProfile != nil && vm.Properties.OSProfile.ComputerName != nil { + computerName = *(vm.Properties.OSProfile.ComputerName) } - if vm.VirtualMachineProperties.HardwareProfile != nil { - size = string(vm.VirtualMachineProperties.HardwareProfile.VMSize) + if vm.Properties.HardwareProfile != nil { + size = string(*vm.Properties.HardwareProfile.VMSize) } } @@ -552,8 +630,8 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine { } } -func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName string) virtualMachine { - osType := string(vm.StorageProfile.OsDisk.OsType) +func mapFromVMScaleSetVM(vm armcompute.VirtualMachineScaleSetVM, scaleSetName string) virtualMachine { + var osType string tags := map[string]*string{} networkInterfaces := []string{} var computerName string @@ -563,18 +641,23 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin tags = vm.Tags } - if vm.NetworkProfile != nil { - for _, vmNIC := range *(vm.NetworkProfile.NetworkInterfaces) { - networkInterfaces = append(networkInterfaces, *vmNIC.ID) + if vm.Properties != nil { + if vm.Properties.StorageProfile != nil && + vm.Properties.StorageProfile.OSDisk != nil && + vm.Properties.StorageProfile.OSDisk.OSType != nil { + osType = string(*vm.Properties.StorageProfile.OSDisk.OSType) } - } - if vm.VirtualMachineScaleSetVMProperties != nil { - if vm.VirtualMachineScaleSetVMProperties.OsProfile != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName != nil { - computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName) + if vm.Properties.NetworkProfile != nil { + for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces { + networkInterfaces = append(networkInterfaces, *vmNIC.ID) + } } - if vm.VirtualMachineScaleSetVMProperties.HardwareProfile != nil { - size = string(vm.VirtualMachineScaleSetVMProperties.HardwareProfile.VMSize) + if vm.Properties.OSProfile != nil && vm.Properties.OSProfile.ComputerName != nil { + computerName = *(vm.Properties.OSProfile.ComputerName) + } + if vm.Properties.HardwareProfile != nil { + size = string(*vm.Properties.HardwareProfile.VMSize) } } @@ -586,6 +669,7 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin Location: *(vm.Location), OsType: osType, ScaleSet: scaleSetName, + InstanceID: *(vm.InstanceID), Tags: tags, NetworkInterfaces: networkInterfaces, Size: size, @@ -594,38 +678,58 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin var errorNotFound = errors.New("network interface does not exist") -// getNetworkInterfaceByID gets the network interface. +// getVMNetworkInterfaceByID gets the network interface. // If a 404 is returned from the Azure API, `errorNotFound` is returned. -// On all other errors, an autorest.DetailedError is returned. -func (client *azureClient) getNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*network.Interface, error) { - result := network.Interface{} - queryParameters := map[string]interface{}{ - "api-version": "2018-10-01", - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.nic.BaseURI), - autorest.WithPath(networkInterfaceID), - autorest.WithQueryParameters(queryParameters), - autorest.WithUserAgent(userAgent)) - req, err := preparer.Prepare((&http.Request{}).WithContext(ctx)) +func (client *azureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) { + r, err := newAzureResourceFromID(networkInterfaceID, client.logger) + if err != nil { + return nil, fmt.Errorf("could not parse network interface ID: %w", err) + } + + resp, err := client.nic.Get(ctx, r.ResourceGroupName, r.Name, &armnetwork.InterfacesClientGetOptions{Expand: to.Ptr("IPConfigurations/PublicIPAddress")}) if err != nil { - return nil, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request") + var responseError *azcore.ResponseError + if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound { + return nil, errorNotFound + } + return nil, fmt.Errorf("failed to retrieve Interface %v with error: %w", networkInterfaceID, err) } - resp, err := client.nic.GetSender(req) + return &resp.Interface, nil +} + +// getVMScaleSetVMNetworkInterfaceByID gets the network interface. +// If a 404 is returned from the Azure API, `errorNotFound` is returned. +func (client *azureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) { + r, err := newAzureResourceFromID(networkInterfaceID, client.logger) if err != nil { - return nil, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request") + return nil, fmt.Errorf("could not parse network interface ID: %w", err) } - result, err = client.nic.GetResponder(resp) + resp, err := client.nic.GetVirtualMachineScaleSetNetworkInterface(ctx, r.ResourceGroupName, scaleSetName, instanceID, r.Name, &armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions{Expand: to.Ptr("IPConfigurations/PublicIPAddress")}) if err != nil { - if resp.StatusCode == http.StatusNotFound { + var responseError *azcore.ResponseError + if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound { return nil, errorNotFound } - return nil, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure responding to request") + return nil, fmt.Errorf("failed to retrieve Interface %v with error: %w", networkInterfaceID, err) } - return &result, nil + return &resp.Interface, nil +} + +// addToCache will add the network interface information for the specified nicID. +func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) { + random := rand.Int63n(int64(time.Duration(d.cfg.RefreshInterval * 3).Seconds())) + rs := time.Duration(random) * time.Second + exptime := time.Duration(d.cfg.RefreshInterval*10) + rs + d.cache.Set(nicID, netInt, cache.WithExpiration(exptime)) + level.Debug(d.logger).Log("msg", "Adding nic", "nic", nicID, "time", exptime.Seconds()) +} + +// getFromCache will get the network Interface for the specified nicID +// If the cache is disabled nothing will happen. +func (d *Discovery) getFromCache(nicID string) (*armnetwork.Interface, bool) { + net, found := d.cache.Get(nicID) + return net, found } diff --git a/discovery/azure/azure_test.go b/discovery/azure/azure_test.go index 179b97ba61..32dab66c8c 100644 --- a/discovery/azure/azure_test.go +++ b/discovery/azure/azure_test.go @@ -14,49 +14,60 @@ package azure import ( + "context" + "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + cache "github.com/Code-Hex/go-generics-cache" + "github.com/Code-Hex/go-generics-cache/policy/lru" + "github.com/go-kit/log" "github.com/stretchr/testify/require" "go.uber.org/goleak" ) func TestMain(m *testing.M) { - goleak.VerifyTestMain(m) + goleak.VerifyTestMain(m, + goleak.IgnoreTopFunction("github.com/Code-Hex/go-generics-cache.(*janitor).run.func1"), + ) } func TestMapFromVMWithEmptyTags(t *testing.T) { id := "test" name := "name" size := "size" + vmSize := armcompute.VirtualMachineSizeTypes(size) + osType := armcompute.OperatingSystemTypesLinux vmType := "type" location := "westeurope" computerName := "computer_name" - networkProfile := compute.NetworkProfile{ - NetworkInterfaces: &[]compute.NetworkInterfaceReference{}, + networkProfile := armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, } - properties := &compute.VirtualMachineProperties{ - OsProfile: &compute.OSProfile{ + properties := &armcompute.VirtualMachineProperties{ + OSProfile: &armcompute.OSProfile{ ComputerName: &computerName, }, - StorageProfile: &compute.StorageProfile{ - OsDisk: &compute.OSDisk{ - OsType: "Linux", + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{ + OSType: &osType, }, }, NetworkProfile: &networkProfile, - HardwareProfile: &compute.HardwareProfile{ - VMSize: compute.VirtualMachineSizeTypes(size), + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: &vmSize, }, } - testVM := compute.VirtualMachine{ - ID: &id, - Name: &name, - Type: &vmType, - Location: &location, - Tags: nil, - VirtualMachineProperties: properties, + testVM := armcompute.VirtualMachine{ + ID: &id, + Name: &name, + Type: &vmType, + Location: &location, + Tags: nil, + Properties: properties, } expectedVM := virtualMachine{ @@ -76,41 +87,177 @@ func TestMapFromVMWithEmptyTags(t *testing.T) { require.Equal(t, expectedVM, actualVM) } +func TestVMToLabelSet(t *testing.T) { + id := "/subscriptions/00000000-0000-0000-0000-000000000000/test" + name := "name" + size := "size" + vmSize := armcompute.VirtualMachineSizeTypes(size) + osType := armcompute.OperatingSystemTypesLinux + vmType := "type" + location := "westeurope" + computerName := "computer_name" + networkID := "/subscriptions/00000000-0000-0000-0000-000000000000/network1" + ipAddress := "10.20.30.40" + primary := true + networkProfile := armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ + { + ID: &networkID, + Properties: &armcompute.NetworkInterfaceReferenceProperties{Primary: &primary}, + }, + }, + } + properties := &armcompute.VirtualMachineProperties{ + OSProfile: &armcompute.OSProfile{ + ComputerName: &computerName, + }, + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{ + OSType: &osType, + }, + }, + NetworkProfile: &networkProfile, + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: &vmSize, + }, + } + + testVM := armcompute.VirtualMachine{ + ID: &id, + Name: &name, + Type: &vmType, + Location: &location, + Tags: nil, + Properties: properties, + } + + expectedVM := virtualMachine{ + ID: id, + Name: name, + ComputerName: computerName, + Type: vmType, + Location: location, + OsType: "Linux", + Tags: map[string]*string{}, + NetworkInterfaces: []string{networkID}, + Size: size, + } + + actualVM := mapFromVM(testVM) + + require.Equal(t, expectedVM, actualVM) + + cfg := DefaultSDConfig + d := &Discovery{ + cfg: &cfg, + logger: log.NewNopLogger(), + cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))), + } + network := armnetwork.Interface{ + Name: &networkID, + Properties: &armnetwork.InterfacePropertiesFormat{ + Primary: &primary, + IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ + {Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ + PrivateIPAddress: &ipAddress, + }}, + }, + }, + } + client := &mockAzureClient{ + networkInterface: &network, + } + labelSet, err := d.vmToLabelSet(context.Background(), client, actualVM) + require.NoError(t, err) + require.Len(t, labelSet, 11) +} + +func TestMapFromVMWithEmptyOSType(t *testing.T) { + id := "test" + name := "name" + size := "size" + vmSize := armcompute.VirtualMachineSizeTypes(size) + vmType := "type" + location := "westeurope" + computerName := "computer_name" + networkProfile := armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, + } + properties := &armcompute.VirtualMachineProperties{ + OSProfile: &armcompute.OSProfile{ + ComputerName: &computerName, + }, + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{}, + }, + NetworkProfile: &networkProfile, + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: &vmSize, + }, + } + + testVM := armcompute.VirtualMachine{ + ID: &id, + Name: &name, + Type: &vmType, + Location: &location, + Tags: nil, + Properties: properties, + } + + expectedVM := virtualMachine{ + ID: id, + Name: name, + ComputerName: computerName, + Type: vmType, + Location: location, + Tags: map[string]*string{}, + NetworkInterfaces: []string{}, + Size: size, + } + + actualVM := mapFromVM(testVM) + + require.Equal(t, expectedVM, actualVM) +} + func TestMapFromVMWithTags(t *testing.T) { id := "test" name := "name" size := "size" + vmSize := armcompute.VirtualMachineSizeTypes(size) + osType := armcompute.OperatingSystemTypesLinux vmType := "type" location := "westeurope" computerName := "computer_name" tags := map[string]*string{ "prometheus": new(string), } - networkProfile := compute.NetworkProfile{ - NetworkInterfaces: &[]compute.NetworkInterfaceReference{}, + networkProfile := armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, } - properties := &compute.VirtualMachineProperties{ - OsProfile: &compute.OSProfile{ + properties := &armcompute.VirtualMachineProperties{ + OSProfile: &armcompute.OSProfile{ ComputerName: &computerName, }, - StorageProfile: &compute.StorageProfile{ - OsDisk: &compute.OSDisk{ - OsType: "Linux", + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{ + OSType: &osType, }, }, NetworkProfile: &networkProfile, - HardwareProfile: &compute.HardwareProfile{ - VMSize: compute.VirtualMachineSizeTypes(size), + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: &vmSize, }, } - testVM := compute.VirtualMachine{ - ID: &id, - Name: &name, - Type: &vmType, - Location: &location, - Tags: tags, - VirtualMachineProperties: properties, + testVM := armcompute.VirtualMachine{ + ID: &id, + Name: &name, + Type: &vmType, + Location: &location, + Tags: tags, + Properties: properties, } expectedVM := virtualMachine{ @@ -134,34 +281,38 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { id := "test" name := "name" size := "size" + vmSize := armcompute.VirtualMachineSizeTypes(size) + osType := armcompute.OperatingSystemTypesLinux vmType := "type" + instanceID := "123" location := "westeurope" computerName := "computer_name" - networkProfile := compute.NetworkProfile{ - NetworkInterfaces: &[]compute.NetworkInterfaceReference{}, + networkProfile := armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, } - properties := &compute.VirtualMachineScaleSetVMProperties{ - OsProfile: &compute.OSProfile{ + properties := &armcompute.VirtualMachineScaleSetVMProperties{ + OSProfile: &armcompute.OSProfile{ ComputerName: &computerName, }, - StorageProfile: &compute.StorageProfile{ - OsDisk: &compute.OSDisk{ - OsType: "Linux", + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{ + OSType: &osType, }, }, NetworkProfile: &networkProfile, - HardwareProfile: &compute.HardwareProfile{ - VMSize: compute.VirtualMachineSizeTypes(size), + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: &vmSize, }, } - testVM := compute.VirtualMachineScaleSetVM{ - ID: &id, - Name: &name, - Type: &vmType, - Location: &location, - Tags: nil, - VirtualMachineScaleSetVMProperties: properties, + testVM := armcompute.VirtualMachineScaleSetVM{ + ID: &id, + Name: &name, + Type: &vmType, + InstanceID: &instanceID, + Location: &location, + Tags: nil, + Properties: properties, } scaleSet := "testSet" @@ -175,6 +326,59 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { Tags: map[string]*string{}, NetworkInterfaces: []string{}, ScaleSet: scaleSet, + InstanceID: instanceID, + Size: size, + } + + actualVM := mapFromVMScaleSetVM(testVM, scaleSet) + + require.Equal(t, expectedVM, actualVM) +} + +func TestMapFromVMScaleSetVMWithEmptyOSType(t *testing.T) { + id := "test" + name := "name" + size := "size" + vmSize := armcompute.VirtualMachineSizeTypes(size) + vmType := "type" + instanceID := "123" + location := "westeurope" + computerName := "computer_name" + networkProfile := armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, + } + properties := &armcompute.VirtualMachineScaleSetVMProperties{ + OSProfile: &armcompute.OSProfile{ + ComputerName: &computerName, + }, + StorageProfile: &armcompute.StorageProfile{}, + NetworkProfile: &networkProfile, + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: &vmSize, + }, + } + + testVM := armcompute.VirtualMachineScaleSetVM{ + ID: &id, + Name: &name, + Type: &vmType, + InstanceID: &instanceID, + Location: &location, + Tags: nil, + Properties: properties, + } + + scaleSet := "testSet" + expectedVM := virtualMachine{ + ID: id, + Name: name, + ComputerName: computerName, + Type: vmType, + Location: location, + Tags: map[string]*string{}, + NetworkInterfaces: []string{}, + ScaleSet: scaleSet, + InstanceID: instanceID, Size: size, } @@ -187,37 +391,41 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { id := "test" name := "name" size := "size" + vmSize := armcompute.VirtualMachineSizeTypes(size) + osType := armcompute.OperatingSystemTypesLinux vmType := "type" + instanceID := "123" location := "westeurope" computerName := "computer_name" tags := map[string]*string{ "prometheus": new(string), } - networkProfile := compute.NetworkProfile{ - NetworkInterfaces: &[]compute.NetworkInterfaceReference{}, + networkProfile := armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{}, } - properties := &compute.VirtualMachineScaleSetVMProperties{ - OsProfile: &compute.OSProfile{ + properties := &armcompute.VirtualMachineScaleSetVMProperties{ + OSProfile: &armcompute.OSProfile{ ComputerName: &computerName, }, - StorageProfile: &compute.StorageProfile{ - OsDisk: &compute.OSDisk{ - OsType: "Linux", + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{ + OSType: &osType, }, }, NetworkProfile: &networkProfile, - HardwareProfile: &compute.HardwareProfile{ - VMSize: compute.VirtualMachineSizeTypes(size), + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: &vmSize, }, } - testVM := compute.VirtualMachineScaleSetVM{ - ID: &id, - Name: &name, - Type: &vmType, - Location: &location, - Tags: tags, - VirtualMachineScaleSetVMProperties: properties, + testVM := armcompute.VirtualMachineScaleSetVM{ + ID: &id, + Name: &name, + Type: &vmType, + InstanceID: &instanceID, + Location: &location, + Tags: tags, + Properties: properties, } scaleSet := "testSet" @@ -231,6 +439,7 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { Tags: tags, NetworkInterfaces: []string{}, ScaleSet: scaleSet, + InstanceID: instanceID, Size: size, } @@ -242,18 +451,58 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { func TestNewAzureResourceFromID(t *testing.T) { for _, tc := range []struct { id string - expected azureResource + expected *arm.ResourceID }{ { - id: "/a/b/c/group/d/e/f/name", - expected: azureResource{"name", "group"}, + id: "/subscriptions/SUBSCRIPTION_ID/resourceGroups/group/providers/PROVIDER/TYPE/name", + expected: &arm.ResourceID{ + Name: "name", + ResourceGroupName: "group", + }, }, { - id: "/a/b/c/group/d/e/f/name/g/h", - expected: azureResource{"name", "group"}, + id: "/subscriptions/SUBSCRIPTION_ID/resourceGroups/group/providers/PROVIDER/TYPE/name/TYPE/h", + expected: &arm.ResourceID{ + Name: "h", + ResourceGroupName: "group", + }, }, } { - actual, _ := newAzureResourceFromID(tc.id, nil) - require.Equal(t, tc.expected, actual) + actual, err := newAzureResourceFromID(tc.id, nil) + require.NoError(t, err) + require.Equal(t, tc.expected.Name, actual.Name) + require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName) + } +} + +type mockAzureClient struct { + networkInterface *armnetwork.Interface +} + +var _ client = &mockAzureClient{} + +func (*mockAzureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) { + return nil, nil +} + +func (*mockAzureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) { + return nil, nil +} + +func (*mockAzureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) { + return nil, nil +} + +func (m *mockAzureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) { + if networkInterfaceID == "" { + return nil, fmt.Errorf("parameter networkInterfaceID cannot be empty") + } + return m.networkInterface, nil +} + +func (m *mockAzureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) { + if scaleSetName == "" { + return nil, fmt.Errorf("parameter virtualMachineScaleSetName cannot be empty") } + return m.networkInterface, nil } diff --git a/discovery/azure/metrics.go b/discovery/azure/metrics.go new file mode 100644 index 0000000000..3e3dbdbfbb --- /dev/null +++ b/discovery/azure/metrics.go @@ -0,0 +1,64 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*azureMetrics)(nil) + +type azureMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator + + failuresCount prometheus.Counter + cacheHitCount prometheus.Counter + + metricRegisterer discovery.MetricRegisterer +} + +func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + m := &azureMetrics{ + refreshMetrics: rmi, + failuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_azure_failures_total", + Help: "Number of Azure service discovery refresh failures.", + }), + cacheHitCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_azure_cache_hit_total", + Help: "Number of cache hit during refresh.", + }), + } + + m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ + m.failuresCount, + m.cacheHitCount, + }) + + return m +} + +// Register implements discovery.DiscovererMetrics. +func (m *azureMetrics) Register() error { + return m.metricRegisterer.RegisterMetrics() +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *azureMetrics) Unregister() { + m.metricRegisterer.UnregisterMetrics() +} diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 99ea396b99..bdc1fc8dce 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -50,7 +50,7 @@ const ( tagsLabel = model.MetaLabelPrefix + "consul_tags" // serviceLabel is the name of the label containing the service name. serviceLabel = model.MetaLabelPrefix + "consul_service" - // healthLabel is the name of the label containing the health of the service instance + // healthLabel is the name of the label containing the health of the service instance. healthLabel = model.MetaLabelPrefix + "consul_health" // serviceAddressLabel is the name of the label containing the (optional) service address. serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" @@ -71,41 +71,18 @@ const ( namespace = "prometheus" ) -var ( - rpcFailuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_consul_rpc_failures_total", - Help: "The number of Consul RPC call failures.", - }) - rpcDuration = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Namespace: namespace, - Name: "sd_consul_rpc_duration_seconds", - Help: "The duration of a Consul RPC call in seconds.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - []string{"endpoint", "call"}, - ) - - // Initialize metric vectors. - servicesRPCDuration = rpcDuration.WithLabelValues("catalog", "services") - serviceRPCDuration = rpcDuration.WithLabelValues("catalog", "service") - - // DefaultSDConfig is the default Consul SD configuration. - DefaultSDConfig = SDConfig{ - TagSeparator: ",", - Scheme: "http", - Server: "localhost:8500", - AllowStale: true, - RefreshInterval: model.Duration(30 * time.Second), - HTTPClientConfig: config.DefaultHTTPClientConfig, - } -) +// DefaultSDConfig is the default Consul SD configuration. +var DefaultSDConfig = SDConfig{ + TagSeparator: ",", + Scheme: "http", + Server: "localhost:8500", + AllowStale: true, + RefreshInterval: model.Duration(30 * time.Second), + HTTPClientConfig: config.DefaultHTTPClientConfig, +} func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(rpcFailuresCount, rpcDuration) } // SDConfig is the configuration for Consul service discovery. @@ -142,12 +119,17 @@ type SDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return newDiscovererMetrics(reg, rmi) +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "consul" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -196,10 +178,16 @@ type Discovery struct { refreshInterval time.Duration finalizer func() logger log.Logger + metrics *consulMetrics } // NewDiscovery returns a new Discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*consulMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + if logger == nil { logger = log.NewNopLogger() } @@ -237,7 +225,9 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { clientPartition: conf.Partition, finalizer: wrapper.CloseIdleConnections, logger: logger, + metrics: m, } + return cd, nil } @@ -293,7 +283,7 @@ func (d *Discovery) getDatacenter() error { info, err := d.client.Agent().Self() if err != nil { level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) - rpcFailuresCount.Inc() + d.metrics.rpcFailuresCount.Inc() return err } @@ -382,7 +372,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup. t0 := time.Now() srvs, meta, err := catalog.Services(opts.WithContext(ctx)) elapsed := time.Since(t0) - servicesRPCDuration.Observe(elapsed.Seconds()) + d.metrics.servicesRPCDuration.Observe(elapsed.Seconds()) // Check the context before in order to exit early. select { @@ -393,7 +383,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup. if err != nil { level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err) - rpcFailuresCount.Inc() + d.metrics.rpcFailuresCount.Inc() time.Sleep(retryInterval) return } @@ -449,13 +439,15 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup. // consulService contains data belonging to the same service. type consulService struct { - name string - tags []string - labels model.LabelSet - discovery *Discovery - client *consul.Client - tagSeparator string - logger log.Logger + name string + tags []string + labels model.LabelSet + discovery *Discovery + client *consul.Client + tagSeparator string + logger log.Logger + rpcFailuresCount prometheus.Counter + serviceRPCDuration prometheus.Observer } // Start watching a service. @@ -469,8 +461,10 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G serviceLabel: model.LabelValue(name), datacenterLabel: model.LabelValue(d.clientDatacenter), }, - tagSeparator: d.tagSeparator, - logger: d.logger, + tagSeparator: d.tagSeparator, + logger: d.logger, + rpcFailuresCount: d.metrics.rpcFailuresCount, + serviceRPCDuration: d.metrics.serviceRPCDuration, } go func() { @@ -508,7 +502,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr t0 := time.Now() serviceNodes, meta, err := health.ServiceMultipleTags(srv.name, srv.tags, false, opts.WithContext(ctx)) elapsed := time.Since(t0) - serviceRPCDuration.Observe(elapsed.Seconds()) + srv.serviceRPCDuration.Observe(elapsed.Seconds()) // Check the context before in order to exit early. select { @@ -520,7 +514,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr if err != nil { level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err) - rpcFailuresCount.Inc() + srv.rpcFailuresCount.Inc() time.Sleep(retryInterval) return } @@ -545,9 +539,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr // since the service may be registered remotely through a different node. var addr string if serviceNode.Service.Address != "" { - addr = net.JoinHostPort(serviceNode.Service.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) + addr = net.JoinHostPort(serviceNode.Service.Address, strconv.Itoa(serviceNode.Service.Port)) } else { - addr = net.JoinHostPort(serviceNode.Node.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) + addr = net.JoinHostPort(serviceNode.Node.Address, strconv.Itoa(serviceNode.Service.Port)) } labels := model.LabelSet{ diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index c929601638..e3bc7938f5 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -22,12 +22,14 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -35,20 +37,30 @@ func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } +// TODO: Add ability to unregister metrics? +func NewTestMetrics(t *testing.T, conf discovery.Config, reg prometheus.Registerer) discovery.DiscovererMetrics { + refreshMetrics := discovery.NewRefreshMetrics(reg) + require.NoError(t, refreshMetrics.Register()) + + metrics := conf.NewDiscovererMetrics(prometheus.NewRegistry(), refreshMetrics) + require.NoError(t, metrics.Register()) + + return metrics +} + func TestConfiguredService(t *testing.T) { conf := &SDConfig{ Services: []string{"configuredServiceName"}, } - consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { - t.Errorf("Unexpected error when initializing discovery %v", err) - } - if !consulDiscovery.shouldWatch("configuredServiceName", []string{""}) { - t.Errorf("Expected service %s to be watched", "configuredServiceName") - } - if consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}) { - t.Errorf("Expected service %s to not be watched", "nonConfiguredServiceName") - } + + metrics := NewTestMetrics(t, conf, prometheus.NewRegistry()) + + consulDiscovery, err := NewDiscovery(conf, nil, metrics) + require.NoError(t, err, "when initializing discovery") + require.True(t, consulDiscovery.shouldWatch("configuredServiceName", []string{""}), + "Expected service %s to be watched", "configuredServiceName") + require.False(t, consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}), + "Expected service %s to not be watched", "nonConfiguredServiceName") } func TestConfiguredServiceWithTag(t *testing.T) { @@ -56,22 +68,22 @@ func TestConfiguredServiceWithTag(t *testing.T) { Services: []string{"configuredServiceName"}, ServiceTags: []string{"http"}, } - consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { - t.Errorf("Unexpected error when initializing discovery %v", err) - } - if consulDiscovery.shouldWatch("configuredServiceName", []string{""}) { - t.Errorf("Expected service %s to not be watched without tag", "configuredServiceName") - } - if !consulDiscovery.shouldWatch("configuredServiceName", []string{"http"}) { - t.Errorf("Expected service %s to be watched with tag %s", "configuredServiceName", "http") - } - if consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}) { - t.Errorf("Expected service %s to not be watched without tag", "nonConfiguredServiceName") - } - if consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{"http"}) { - t.Errorf("Expected service %s to not be watched with tag %s", "nonConfiguredServiceName", "http") - } + + metrics := NewTestMetrics(t, conf, prometheus.NewRegistry()) + + consulDiscovery, err := NewDiscovery(conf, nil, metrics) + require.NoError(t, err, "when initializing discovery") + require.False(t, consulDiscovery.shouldWatch("configuredServiceName", []string{""}), + "Expected service %s to not be watched without tag", "configuredServiceName") + + require.True(t, consulDiscovery.shouldWatch("configuredServiceName", []string{"http"}), + "Expected service %s to be watched with tag %s", "configuredServiceName", "http") + + require.False(t, consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}), + "Expected service %s to not be watched without tag", "nonConfiguredServiceName") + + require.False(t, consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{"http"}), + "Expected service %s to not be watched with tag %s", "nonConfiguredServiceName", "http") } func TestConfiguredServiceWithTags(t *testing.T) { @@ -151,27 +163,24 @@ func TestConfiguredServiceWithTags(t *testing.T) { } for _, tc := range cases { - consulDiscovery, err := NewDiscovery(tc.conf, nil) - if err != nil { - t.Errorf("Unexpected error when initializing discovery %v", err) - } - ret := consulDiscovery.shouldWatch(tc.serviceName, tc.serviceTags) - if ret != tc.shouldWatch { - t.Errorf("Expected should watch? %t, got %t. Watched service and tags: %s %+v, input was %s %+v", tc.shouldWatch, ret, tc.conf.Services, tc.conf.ServiceTags, tc.serviceName, tc.serviceTags) - } + metrics := NewTestMetrics(t, tc.conf, prometheus.NewRegistry()) + consulDiscovery, err := NewDiscovery(tc.conf, nil, metrics) + require.NoError(t, err, "when initializing discovery") + ret := consulDiscovery.shouldWatch(tc.serviceName, tc.serviceTags) + require.Equal(t, tc.shouldWatch, ret, "Watched service and tags: %s %+v, input was %s %+v", + tc.conf.Services, tc.conf.ServiceTags, tc.serviceName, tc.serviceTags) } } func TestNonConfiguredService(t *testing.T) { conf := &SDConfig{} - consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { - t.Errorf("Unexpected error when initializing discovery %v", err) - } - if !consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}) { - t.Errorf("Expected service %s to be watched", "nonConfiguredServiceName") - } + + metrics := NewTestMetrics(t, conf, prometheus.NewRegistry()) + + consulDiscovery, err := NewDiscovery(conf, nil, metrics) + require.NoError(t, err, "when initializing discovery") + require.True(t, consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}), "Expected service %s to be watched", "nonConfiguredServiceName") } const ( @@ -262,19 +271,22 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) { func newDiscovery(t *testing.T, config *SDConfig) *Discovery { logger := log.NewNopLogger() - d, err := NewDiscovery(config, logger) + + metrics := NewTestMetrics(t, config, prometheus.NewRegistry()) + + d, err := NewDiscovery(config, logger, metrics) require.NoError(t, err) return d } func checkOneTarget(t *testing.T, tg []*targetgroup.Group) { - require.Equal(t, 1, len(tg)) + require.Len(t, tg, 1) target := tg[0] require.Equal(t, "test-dc", string(target.Labels["__meta_consul_dc"])) require.Equal(t, target.Source, string(target.Labels["__meta_consul_service"])) if target.Source == "test" { // test service should have one node. - require.Greater(t, len(target.Targets), 0, "Test service should have one node") + require.NotEmpty(t, target.Targets, "Test service should have one node") } } @@ -313,7 +325,7 @@ func TestNoTargets(t *testing.T) { }() targets := (<-ch)[0].Targets - require.Equal(t, 0, len(targets)) + require.Empty(t, targets) cancel() <-ch } @@ -476,15 +488,12 @@ oauth2: var config SDConfig err := config.UnmarshalYAML(unmarshal([]byte(test.config))) if err != nil { - require.Equalf(t, err.Error(), test.errMessage, "Expected error '%s', got '%v'", test.errMessage, err) - return - } - if test.errMessage != "" { - t.Errorf("Expected error %s, got none", test.errMessage) + require.EqualError(t, err, test.errMessage) return } + require.Empty(t, test.errMessage, "Expected error.") - require.Equal(t, config, test.expected) + require.Equal(t, test.expected, config) }) } } diff --git a/discovery/consul/metrics.go b/discovery/consul/metrics.go new file mode 100644 index 0000000000..8266e7cc60 --- /dev/null +++ b/discovery/consul/metrics.go @@ -0,0 +1,73 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consul + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*consulMetrics)(nil) + +type consulMetrics struct { + rpcFailuresCount prometheus.Counter + rpcDuration *prometheus.SummaryVec + + servicesRPCDuration prometheus.Observer + serviceRPCDuration prometheus.Observer + + metricRegisterer discovery.MetricRegisterer +} + +func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + m := &consulMetrics{ + rpcFailuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_consul_rpc_failures_total", + Help: "The number of Consul RPC call failures.", + }), + rpcDuration: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Namespace: namespace, + Name: "sd_consul_rpc_duration_seconds", + Help: "The duration of a Consul RPC call in seconds.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"endpoint", "call"}, + ), + } + + m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ + m.rpcFailuresCount, + m.rpcDuration, + }) + + // Initialize metric vectors. + m.servicesRPCDuration = m.rpcDuration.WithLabelValues("catalog", "services") + m.serviceRPCDuration = m.rpcDuration.WithLabelValues("catalog", "service") + + return m +} + +// Register implements discovery.DiscovererMetrics. +func (m *consulMetrics) Register() error { + return m.metricRegisterer.RegisterMetrics() +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *consulMetrics) Unregister() { + m.metricRegisterer.UnregisterMetrics() +} diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index e207388b3d..ecee60cb1f 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -24,6 +24,7 @@ import ( "github.com/digitalocean/godo" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -62,6 +63,13 @@ func init() { discovery.RegisterConfig(&SDConfig{}) } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &digitaloceanMetrics{ + refreshMetrics: rmi, + } +} + // SDConfig is the configuration for DigitalOcean based service discovery. type SDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` @@ -75,7 +83,7 @@ func (*SDConfig) Name() string { return "digitalocean" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -103,7 +111,12 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*digitaloceanMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + d := &Discovery{ port: conf.Port, } @@ -125,10 +138,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { } d.Discovery = refresh.NewDiscovery( - logger, - "digitalocean", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "digitalocean", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } @@ -161,7 +177,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { } labels := model.LabelSet{ - doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)), + doLabelID: model.LabelValue(strconv.Itoa(droplet.ID)), doLabelName: model.LabelValue(droplet.Name), doLabelImage: model.LabelValue(droplet.Image.Slug), doLabelImageName: model.LabelValue(droplet.Image.Name), diff --git a/discovery/digitalocean/digitalocean_test.go b/discovery/digitalocean/digitalocean_test.go index a5da4b26e3..841b5ef977 100644 --- a/discovery/digitalocean/digitalocean_test.go +++ b/discovery/digitalocean/digitalocean_test.go @@ -20,8 +20,11 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/discovery" ) type DigitalOceanSDTestSuite struct { @@ -46,7 +49,15 @@ func TestDigitalOceanSDRefresh(t *testing.T) { cfg := DefaultSDConfig cfg.HTTPClientConfig.BearerToken = tokenID - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) endpoint, err := url.Parse(sdmock.Mock.Endpoint()) require.NoError(t, err) @@ -56,12 +67,12 @@ func TestDigitalOceanSDRefresh(t *testing.T) { tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 4, len(tg.Targets)) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/digitalocean/metrics.go b/discovery/digitalocean/metrics.go new file mode 100644 index 0000000000..91cfd9bf30 --- /dev/null +++ b/discovery/digitalocean/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digitalocean + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*digitaloceanMetrics)(nil) + +type digitaloceanMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *digitaloceanMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *digitaloceanMetrics) Unregister() {} diff --git a/discovery/digitalocean/mock_test.go b/discovery/digitalocean/mock_test.go index 2f19b5e1a1..62d963c3b3 100644 --- a/discovery/digitalocean/mock_test.go +++ b/discovery/digitalocean/mock_test.go @@ -21,7 +21,7 @@ import ( "testing" ) -// SDMock is the interface for the DigitalOcean mock +// SDMock is the interface for the DigitalOcean mock. type SDMock struct { t *testing.T Server *httptest.Server @@ -35,18 +35,18 @@ func NewSDMock(t *testing.T) *SDMock { } } -// Endpoint returns the URI to the mock server +// Endpoint returns the URI to the mock server. func (m *SDMock) Endpoint() string { return m.Server.URL + "/" } -// Setup creates the mock server +// Setup creates the mock server. func (m *SDMock) Setup() { m.Mux = http.NewServeMux() m.Server = httptest.NewServer(m.Mux) } -// ShutdownServer creates the mock server +// ShutdownServer creates the mock server. func (m *SDMock) ShutdownServer() { m.Server.Close() } diff --git a/discovery/discoverer_metrics_noop.go b/discovery/discoverer_metrics_noop.go new file mode 100644 index 0000000000..4321204b6c --- /dev/null +++ b/discovery/discoverer_metrics_noop.go @@ -0,0 +1,28 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +// NoopDiscovererMetrics creates a dummy metrics struct, because this SD doesn't have any metrics. +type NoopDiscovererMetrics struct{} + +var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil) + +// Register implements discovery.DiscovererMetrics. +func (*NoopDiscovererMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (*NoopDiscovererMetrics) Unregister() { +} diff --git a/discovery/discovery.go b/discovery/discovery.go index 9dc010a09a..9a83df409b 100644 --- a/discovery/discovery.go +++ b/discovery/discovery.go @@ -18,6 +18,7 @@ import ( "reflect" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -38,15 +39,48 @@ type Discoverer interface { Run(ctx context.Context, up chan<- []*targetgroup.Group) } +// DiscovererMetrics are internal metrics of service discovery mechanisms. +type DiscovererMetrics interface { + Register() error + Unregister() +} + // DiscovererOptions provides options for a Discoverer. type DiscovererOptions struct { Logger log.Logger + Metrics DiscovererMetrics + // Extra HTTP client options to expose to Discoverers. This field may be // ignored; Discoverer implementations must opt-in to reading it. HTTPClientOptions []config.HTTPClientOption } +// RefreshMetrics are used by the "refresh" package. +// We define them here in the "discovery" package in order to avoid a cyclic dependency between +// "discovery" and "refresh". +type RefreshMetrics struct { + Failures prometheus.Counter + Duration prometheus.Observer +} + +// RefreshMetricsInstantiator instantiates the metrics used by the "refresh" package. +type RefreshMetricsInstantiator interface { + Instantiate(mech string) *RefreshMetrics +} + +// RefreshMetricsManager is an interface for registering, unregistering, and +// instantiating metrics for the "refresh" package. Refresh metrics are +// registered and unregistered outside of the service discovery mechanism. This +// is so that the same metrics can be reused across different service discovery +// mechanisms. To manage refresh metrics inside the SD mechanism, we'd need to +// use const labels which are specific to that SD. However, doing so would also +// expose too many unused metrics on the Prometheus /metrics endpoint. +type RefreshMetricsManager interface { + DiscovererMetrics + RefreshMetricsInstantiator +} + // A Config provides the configuration and constructor for a Discoverer. type Config interface { // Name returns the name of the discovery mechanism. @@ -55,6 +89,9 @@ type Config interface { // NewDiscoverer returns a Discoverer for the Config // with the given DiscovererOptions. NewDiscoverer(DiscovererOptions) (Discoverer, error) + + // NewDiscovererMetrics returns the metrics used by the service discovery. + NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics } // Configs is a slice of Config values that uses custom YAML marshaling and unmarshaling @@ -72,7 +109,7 @@ func (c *Configs) SetDirectory(dir string) { // UnmarshalYAML implements yaml.Unmarshaler. func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -87,7 +124,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { // MarshalYAML implements yaml.Marshaler. func (c Configs) MarshalYAML() (interface{}, error) { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -109,6 +146,12 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return staticDiscoverer(c), nil } +// NewDiscovererMetrics returns NoopDiscovererMetrics because no metrics are +// needed for this service discovery mechanism. +func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { + return &NoopDiscovererMetrics{} +} + type staticDiscoverer []*targetgroup.Group func (c staticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.Group) { diff --git a/discovery/discovery_test.go b/discovery/discovery_test.go new file mode 100644 index 0000000000..af327195f2 --- /dev/null +++ b/discovery/discovery_test.go @@ -0,0 +1,36 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "testing" + + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestConfigsCustomUnMarshalMarshal(t *testing.T) { + input := `static_configs: +- targets: + - foo:1234 + - bar:4321 +` + cfg := &Configs{} + err := yaml.UnmarshalStrict([]byte(input), cfg) + require.NoError(t, err) + + output, err := yaml.Marshal(cfg) + require.NoError(t, err) + require.Equal(t, input, string(output)) +} diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index 96e07254f0..314c3d38cd 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net" + "strconv" "strings" "sync" "time" @@ -42,35 +43,21 @@ const ( dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port" dnsMxRecordPrefix = model.MetaLabelPrefix + "dns_mx_record_" dnsMxRecordTargetLabel = dnsMxRecordPrefix + "target" + dnsNsRecordPrefix = model.MetaLabelPrefix + "dns_ns_record_" + dnsNsRecordTargetLabel = dnsNsRecordPrefix + "target" // Constants for instrumentation. namespace = "prometheus" ) -var ( - dnsSDLookupsCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_dns_lookups_total", - Help: "The number of DNS-SD lookups.", - }) - dnsSDLookupFailuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_dns_lookup_failures_total", - Help: "The number of DNS-SD lookup failures.", - }) - - // DefaultSDConfig is the default DNS SD configuration. - DefaultSDConfig = SDConfig{ - RefreshInterval: model.Duration(30 * time.Second), - Type: "SRV", - } -) +// DefaultSDConfig is the default DNS SD configuration. +var DefaultSDConfig = SDConfig{ + RefreshInterval: model.Duration(30 * time.Second), + Type: "SRV", +} func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(dnsSDLookupFailuresCount, dnsSDLookupsCount) } // SDConfig is the configuration for DNS based service discovery. @@ -81,12 +68,17 @@ type SDConfig struct { Port int `yaml:"port"` // Ignored for SRV records } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return newDiscovererMetrics(reg, rmi) +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "dns" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(*c, opts.Logger), nil + return NewDiscovery(*c, opts.Logger, opts.Metrics) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -102,7 +94,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } switch strings.ToUpper(c.Type) { case "SRV": - case "A", "AAAA", "MX": + case "A", "AAAA", "MX", "NS": if c.Port == 0 { return errors.New("a port is required in DNS-SD configs for all record types except SRV") } @@ -116,16 +108,22 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // the Discoverer interface. type Discovery struct { *refresh.Discovery - names []string - port int - qtype uint16 - logger log.Logger + names []string + port int + qtype uint16 + logger log.Logger + metrics *dnsMetrics lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery { +func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*dnsMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + if logger == nil { logger = log.NewNopLogger() } @@ -140,6 +138,8 @@ func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery { qtype = dns.TypeSRV case "MX": qtype = dns.TypeMX + case "NS": + qtype = dns.TypeNS } d := &Discovery{ names: conf.Names, @@ -147,14 +147,20 @@ func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery { port: conf.Port, logger: logger, lookupFn: lookupWithSearchPath, + metrics: m, } + d.Discovery = refresh.NewDiscovery( - logger, - "dns", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "dns", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) - return d + + return d, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { @@ -187,24 +193,24 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targetgroup.Group) error { response, err := d.lookupFn(name, d.qtype, d.logger) - dnsSDLookupsCount.Inc() + d.metrics.dnsSDLookupsCount.Inc() if err != nil { - dnsSDLookupFailuresCount.Inc() + d.metrics.dnsSDLookupFailuresCount.Inc() return err } tg := &targetgroup.Group{} hostPort := func(a string, p int) model.LabelValue { - return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p))) + return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p))) } for _, record := range response.Answer { - var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget model.LabelValue + var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget, dnsNsRecordTarget model.LabelValue switch addr := record.(type) { case *dns.SRV: dnsSrvRecordTarget = model.LabelValue(addr.Target) - dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port)) + dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port))) // Remove the final dot from rooted DNS names to make them look more usual. addr.Target = strings.TrimRight(addr.Target, ".") @@ -217,6 +223,13 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ addr.Mx = strings.TrimRight(addr.Mx, ".") target = hostPort(addr.Mx, d.port) + case *dns.NS: + dnsNsRecordTarget = model.LabelValue(addr.Ns) + + // Remove the final dot from rooted DNS names to make them look more usual. + addr.Ns = strings.TrimRight(addr.Ns, ".") + + target = hostPort(addr.Ns, d.port) case *dns.A: target = hostPort(addr.A.String(), d.port) case *dns.AAAA: @@ -234,6 +247,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ dnsSrvRecordTargetLabel: dnsSrvRecordTarget, dnsSrvRecordPortLabel: dnsSrvRecordPort, dnsMxRecordTargetLabel: dnsMxRecordTarget, + dnsNsRecordTargetLabel: dnsNsRecordTarget, }) } diff --git a/discovery/dns/dns_test.go b/discovery/dns/dns_test.go index 50b2860496..33a976827d 100644 --- a/discovery/dns/dns_test.go +++ b/discovery/dns/dns_test.go @@ -22,11 +22,13 @@ import ( "github.com/go-kit/log" "github.com/miekg/dns" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -81,6 +83,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "", + "__meta_dns_ns_record_target": "", }, }, }, @@ -112,6 +115,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "", + "__meta_dns_ns_record_target": "", }, }, }, @@ -143,6 +147,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "db1.example.com.", "__meta_dns_srv_record_port": "3306", "__meta_dns_mx_record_target": "", + "__meta_dns_ns_record_target": "", }, { "__address__": "db2.example.com:3306", @@ -150,6 +155,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "db2.example.com.", "__meta_dns_srv_record_port": "3306", "__meta_dns_mx_record_target": "", + "__meta_dns_ns_record_target": "", }, }, }, @@ -180,6 +186,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "db1.example.com.", "__meta_dns_srv_record_port": "3306", "__meta_dns_mx_record_target": "", + "__meta_dns_ns_record_target": "", }, }, }, @@ -227,6 +234,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "smtp1.example.com.", + "__meta_dns_ns_record_target": "", }, { "__address__": "smtp2.example.com:25", @@ -234,6 +242,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "smtp2.example.com.", + "__meta_dns_ns_record_target": "", }, }, }, @@ -245,12 +254,21 @@ func TestDNS(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - sd := NewDiscovery(tc.config, nil) + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := tc.config.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + + sd, err := NewDiscovery(tc.config, nil, metrics) + require.NoError(t, err) sd.lookupFn = tc.lookup tgs, err := sd.refresh(context.Background()) require.NoError(t, err) require.Equal(t, tc.expected, tgs) + + metrics.Unregister() }) } } diff --git a/discovery/dns/metrics.go b/discovery/dns/metrics.go new file mode 100644 index 0000000000..27c96b53e0 --- /dev/null +++ b/discovery/dns/metrics.go @@ -0,0 +1,66 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dns + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*dnsMetrics)(nil) + +type dnsMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator + + dnsSDLookupsCount prometheus.Counter + dnsSDLookupFailuresCount prometheus.Counter + + metricRegisterer discovery.MetricRegisterer +} + +func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + m := &dnsMetrics{ + refreshMetrics: rmi, + dnsSDLookupsCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_dns_lookups_total", + Help: "The number of DNS-SD lookups.", + }), + dnsSDLookupFailuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_dns_lookup_failures_total", + Help: "The number of DNS-SD lookup failures.", + }), + } + + m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ + m.dnsSDLookupsCount, + m.dnsSDLookupFailuresCount, + }) + + return m +} + +// Register implements discovery.DiscovererMetrics. +func (m *dnsMetrics) Register() error { + return m.metricRegisterer.RegisterMetrics() +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *dnsMetrics) Unregister() { + m.metricRegisterer.UnregisterMetrics() +} diff --git a/discovery/eureka/client.go b/discovery/eureka/client.go index a833415a53..52e8ce7b48 100644 --- a/discovery/eureka/client.go +++ b/discovery/eureka/client.go @@ -81,7 +81,7 @@ const appListPath string = "/apps" func fetchApps(ctx context.Context, server string, client *http.Client) (*Applications, error) { url := fmt.Sprintf("%s%s", server, appListPath) - request, err := http.NewRequest("GET", url, nil) + request, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return nil, err } diff --git a/discovery/eureka/client_test.go b/discovery/eureka/client_test.go index f1451c3a9e..83f6fd5ff1 100644 --- a/discovery/eureka/client_test.go +++ b/discovery/eureka/client_test.go @@ -184,17 +184,17 @@ func TestFetchApps(t *testing.T) { apps, err := fetchApps(context.TODO(), ts.URL, &http.Client{}) require.NoError(t, err) - require.Equal(t, len(apps.Applications), 2) - require.Equal(t, apps.Applications[0].Name, "CONFIG-SERVICE") - require.Equal(t, apps.Applications[1].Name, "META-SERVICE") + require.Len(t, apps.Applications, 2) + require.Equal(t, "CONFIG-SERVICE", apps.Applications[0].Name) + require.Equal(t, "META-SERVICE", apps.Applications[1].Name) - require.Equal(t, len(apps.Applications[1].Instances), 2) - require.Equal(t, apps.Applications[1].Instances[0].InstanceID, "meta-service002.test.com:meta-service:8080") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local, "project") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].Content, "meta-service") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local, "management.port") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].Content, "8090") - require.Equal(t, apps.Applications[1].Instances[1].InstanceID, "meta-service001.test.com:meta-service:8080") + require.Len(t, apps.Applications[1].Instances, 2) + require.Equal(t, "meta-service002.test.com:meta-service:8080", apps.Applications[1].Instances[0].InstanceID) + require.Equal(t, "project", apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local) + require.Equal(t, "meta-service", apps.Applications[1].Instances[0].Metadata.Items[0].Content) + require.Equal(t, "management.port", apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local) + require.Equal(t, "8090", apps.Applications[1].Instances[0].Metadata.Items[1].Content) + require.Equal(t, "meta-service001.test.com:meta-service:8080", apps.Applications[1].Instances[1].InstanceID) } func Test500ErrorHttpResponse(t *testing.T) { diff --git a/discovery/eureka/eureka.go b/discovery/eureka/eureka.go index 5d9d8d552d..779c081aee 100644 --- a/discovery/eureka/eureka.go +++ b/discovery/eureka/eureka.go @@ -16,6 +16,7 @@ package eureka import ( "context" "errors" + "fmt" "net" "net/http" "net/url" @@ -23,6 +24,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -75,12 +77,19 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &eurekaMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "eureka" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -117,7 +126,12 @@ type Discovery struct { } // NewDiscovery creates a new Eureka discovery for the given role. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*eurekaMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd") if err != nil { return nil, err @@ -128,10 +142,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { server: conf.Server, } d.Discovery = refresh.NewDiscovery( - logger, - "eureka", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "eureka", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } @@ -211,7 +228,6 @@ func targetsForApp(app *Application) []model.LabelSet { } targets = append(targets, target) - } return targets } diff --git a/discovery/eureka/eureka_test.go b/discovery/eureka/eureka_test.go index 0641aa7bf2..b499410bfc 100644 --- a/discovery/eureka/eureka_test.go +++ b/discovery/eureka/eureka_test.go @@ -20,9 +20,11 @@ import ( "net/http/httptest" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -35,7 +37,17 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err Server: ts.URL, } - md, err := NewDiscovery(&conf, nil) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) + err := metrics.Register() + if err != nil { + return nil, err + } + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + md, err := NewDiscovery(&conf, nil, metrics) if err != nil { return nil, err } @@ -55,7 +67,7 @@ func TestEurekaSDHandleError(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } func TestEurekaSDEmptyList(t *testing.T) { @@ -72,7 +84,7 @@ func TestEurekaSDEmptyList(t *testing.T) { ) tgs, err := testUpdateServices(respHandler) require.NoError(t, err) - require.Equal(t, len(tgs), 1) + require.Len(t, tgs, 1) } func TestEurekaSDSendGroup(t *testing.T) { @@ -232,11 +244,11 @@ func TestEurekaSDSendGroup(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.NoError(t, err) - require.Equal(t, len(tgs), 1) + require.Len(t, tgs, 1) tg := tgs[0] - require.Equal(t, tg.Source, "eureka") - require.Equal(t, len(tg.Targets), 4) + require.Equal(t, "eureka", tg.Source) + require.Len(t, tg.Targets, 4) tgt := tg.Targets[0] require.Equal(t, tgt[model.AddressLabel], model.LabelValue("config-service001.test.com:8080")) diff --git a/discovery/eureka/metrics.go b/discovery/eureka/metrics.go new file mode 100644 index 0000000000..6d6463ccd3 --- /dev/null +++ b/discovery/eureka/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package eureka + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*eurekaMetrics)(nil) + +type eurekaMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *eurekaMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *eurekaMetrics) Unregister() {} diff --git a/discovery/file/file.go b/discovery/file/file.go index 60b63350f5..e7e9d0870f 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -39,24 +39,6 @@ import ( ) var ( - fileSDReadErrorsCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_file_read_errors_total", - Help: "The number of File-SD read errors.", - }) - fileSDScanDuration = prometheus.NewSummary( - prometheus.SummaryOpts{ - Name: "prometheus_sd_file_scan_duration_seconds", - Help: "The duration of the File-SD scan in seconds.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }) - fileSDTimeStamp = NewTimestampCollector() - fileWatcherErrorsCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_file_watcher_errors_total", - Help: "The number of File-SD errors caused by filesystem watch failures.", - }) - patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`) // DefaultSDConfig is the default file SD configuration. @@ -67,7 +49,6 @@ var ( func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(fileSDReadErrorsCount, fileSDScanDuration, fileSDTimeStamp, fileWatcherErrorsCount) } // SDConfig is the configuration for file based discovery. @@ -76,12 +57,17 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return newDiscovererMetrics(reg, rmi) +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "file" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger), nil + return NewDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -113,6 +99,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { const fileSDFilepathLabel = model.MetaLabelPrefix + "filepath" // TimestampCollector is a Custom Collector for Timestamps of the files. +// TODO(ptodev): Now that each file SD has its own TimestampCollector +// inside discovery/file/metrics.go, we can refactor this collector +// (or get rid of it) as each TimestampCollector instance will only use one discoverer. type TimestampCollector struct { Description *prometheus.Desc discoverers map[*Discovery]struct{} @@ -187,10 +176,17 @@ type Discovery struct { // This is used to detect deleted target groups. lastRefresh map[string]int logger log.Logger + + metrics *fileMetrics } // NewDiscovery returns a new file discovery for the given paths. -func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + fm, ok := metrics.(*fileMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + if logger == nil { logger = log.NewNopLogger() } @@ -200,9 +196,12 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { interval: time.Duration(conf.RefreshInterval), timestamps: make(map[string]float64), logger: logger, + metrics: fm, } - fileSDTimeStamp.addDiscoverer(disc) - return disc + + fm.init(disc) + + return disc, nil } // listFiles returns a list of all files that match the configured patterns. @@ -242,7 +241,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { watcher, err := fsnotify.NewWatcher() if err != nil { level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) - fileWatcherErrorsCount.Inc() + d.metrics.fileWatcherErrorsCount.Inc() return } d.watcher = watcher @@ -306,7 +305,7 @@ func (d *Discovery) stop() { done := make(chan struct{}) defer close(done) - fileSDTimeStamp.removeDiscoverer(d) + d.metrics.fileSDTimeStamp.removeDiscoverer(d) // Closing the watcher will deadlock unless all events and errors are drained. go func() { @@ -332,13 +331,13 @@ func (d *Discovery) stop() { func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) { t0 := time.Now() defer func() { - fileSDScanDuration.Observe(time.Since(t0).Seconds()) + d.metrics.fileSDScanDuration.Observe(time.Since(t0).Seconds()) }() ref := map[string]int{} for _, p := range d.listFiles() { tgroups, err := d.readFile(p) if err != nil { - fileSDReadErrorsCount.Inc() + d.metrics.fileSDReadErrorsCount.Inc() level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err) // Prevent deletion down below. diff --git a/discovery/file/file_test.go b/discovery/file/file_test.go index 76e1cebed9..179ac5cd1c 100644 --- a/discovery/file/file_test.go +++ b/discovery/file/file_test.go @@ -24,10 +24,12 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -143,15 +145,28 @@ func (t *testRunner) run(files ...string) { ctx, cancel := context.WithCancel(context.Background()) t.cancelSD = cancel go func() { - NewDiscovery( - &SDConfig{ - Files: files, - // Setting a high refresh interval to make sure that the tests only - // rely on file watches. - RefreshInterval: model.Duration(1 * time.Hour), - }, + conf := &SDConfig{ + Files: files, + // Setting a high refresh interval to make sure that the tests only + // rely on file watches. + RefreshInterval: model.Duration(1 * time.Hour), + } + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + + d, err := NewDiscovery( + conf, nil, - ).Run(ctx, t.ch) + metrics, + ) + require.NoError(t, err) + + d.Run(ctx, t.ch) + + metrics.Unregister() }() } @@ -188,11 +203,11 @@ func (t *testRunner) targets() []*targetgroup.Group { func (t *testRunner) requireUpdate(ref time.Time, expected []*targetgroup.Group) { t.Helper() + timeout := time.After(defaultWait) for { select { - case <-time.After(defaultWait): + case <-timeout: t.Fatalf("Expected update but got none") - return case <-time.After(defaultWait / 10): if ref.Equal(t.lastReceive()) { // No update received. @@ -342,9 +357,7 @@ func TestInvalidFile(t *testing.T) { // Verify that we've received nothing. time.Sleep(defaultWait) - if runner.lastReceive().After(now) { - t.Fatalf("unexpected targets received: %v", runner.targets()) - } + require.False(t, runner.lastReceive().After(now), "unexpected targets received: %v", runner.targets()) }) } } diff --git a/discovery/file/metrics.go b/discovery/file/metrics.go new file mode 100644 index 0000000000..c01501e4ef --- /dev/null +++ b/discovery/file/metrics.go @@ -0,0 +1,76 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package file + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*fileMetrics)(nil) + +type fileMetrics struct { + fileSDReadErrorsCount prometheus.Counter + fileSDScanDuration prometheus.Summary + fileWatcherErrorsCount prometheus.Counter + fileSDTimeStamp *TimestampCollector + + metricRegisterer discovery.MetricRegisterer +} + +func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + fm := &fileMetrics{ + fileSDReadErrorsCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_file_read_errors_total", + Help: "The number of File-SD read errors.", + }), + fileSDScanDuration: prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "prometheus_sd_file_scan_duration_seconds", + Help: "The duration of the File-SD scan in seconds.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }), + fileWatcherErrorsCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_file_watcher_errors_total", + Help: "The number of File-SD errors caused by filesystem watch failures.", + }), + fileSDTimeStamp: NewTimestampCollector(), + } + + fm.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ + fm.fileSDReadErrorsCount, + fm.fileSDScanDuration, + fm.fileWatcherErrorsCount, + fm.fileSDTimeStamp, + }) + + return fm +} + +// Register implements discovery.DiscovererMetrics. +func (fm *fileMetrics) Register() error { + return fm.metricRegisterer.RegisterMetrics() +} + +// Unregister implements discovery.DiscovererMetrics. +func (fm *fileMetrics) Unregister() { + fm.metricRegisterer.UnregisterMetrics() +} + +func (fm *fileMetrics) init(disc *Discovery) { + fm.fileSDTimeStamp.addDiscoverer(disc) +} diff --git a/discovery/gce/gce.go b/discovery/gce/gce.go index fa05fbbf38..15f32dd247 100644 --- a/discovery/gce/gce.go +++ b/discovery/gce/gce.go @@ -23,6 +23,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "golang.org/x/oauth2/google" "google.golang.org/api/compute/v1" @@ -81,12 +82,19 @@ type SDConfig struct { TagSeparator string `yaml:"tag_separator,omitempty"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &gceMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "gce" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(*c, opts.Logger) + return NewDiscovery(*c, opts.Logger, opts.Metrics) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -121,7 +129,12 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*gceMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + d := &Discovery{ project: conf.Project, zone: conf.Zone, @@ -141,10 +154,13 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { d.isvc = compute.NewInstancesService(d.svc) d.Discovery = refresh.NewDiscovery( - logger, - "gce", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "gce", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } diff --git a/discovery/gce/metrics.go b/discovery/gce/metrics.go new file mode 100644 index 0000000000..f53ea5a8c6 --- /dev/null +++ b/discovery/gce/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gce + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*gceMetrics)(nil) + +type gceMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *gceMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *gceMetrics) Unregister() {} diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index 6d0599dfa2..df56f94c5f 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -15,7 +15,6 @@ package hetzner import ( "context" - "fmt" "net" "net/http" "strconv" @@ -92,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er for i, server := range servers { labels := model.LabelSet{ hetznerLabelRole: model.LabelValue(HetznerRoleHcloud), - hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)), + hetznerLabelServerID: model.LabelValue(strconv.FormatInt(server.ID, 10)), hetznerLabelServerName: model.LabelValue(server.Name), hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name), hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()), @@ -102,10 +101,10 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name), hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone), hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name), - hetznerLabelHcloudCPUCores: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Cores)), + hetznerLabelHcloudCPUCores: model.LabelValue(strconv.Itoa(server.ServerType.Cores)), hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType), - hetznerLabelHcloudMemoryGB: model.LabelValue(fmt.Sprintf("%d", int(server.ServerType.Memory))), - hetznerLabelHcloudDiskGB: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Disk)), + hetznerLabelHcloudMemoryGB: model.LabelValue(strconv.Itoa(int(server.ServerType.Memory))), + hetznerLabelHcloudDiskGB: model.LabelValue(strconv.Itoa(server.ServerType.Disk)), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))), } diff --git a/discovery/hetzner/hcloud_test.go b/discovery/hetzner/hcloud_test.go index a4f19cfddd..10b799037a 100644 --- a/discovery/hetzner/hcloud_test.go +++ b/discovery/hetzner/hcloud_test.go @@ -48,12 +48,12 @@ func TestHCloudSDRefresh(t *testing.T) { targetGroups, err := d.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup, "targetGroup should not be nil") require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil") - require.Equal(t, 3, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 3) for i, labelSet := range []model.LabelSet{ { diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index c3f7ec39c3..69c823d382 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -21,6 +21,7 @@ import ( "github.com/go-kit/log" "github.com/hetznercloud/hcloud-go/v2/hcloud" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -62,12 +63,19 @@ type SDConfig struct { robotEndpoint string // For tests only. } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &hetznerMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "hetzner" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Metrics) } type refresher interface { @@ -127,17 +135,25 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { + m, ok := metrics.(*hetznerMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + r, err := newRefresher(conf, logger) if err != nil { return nil, err } return refresh.NewDiscovery( - logger, - "hetzner", - time.Duration(conf.RefreshInterval), - r.refresh, + refresh.Options{ + Logger: logger, + Mech: "hetzner", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: r.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ), nil } diff --git a/discovery/hetzner/metrics.go b/discovery/hetzner/metrics.go new file mode 100644 index 0000000000..27361ee755 --- /dev/null +++ b/discovery/hetzner/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hetzner + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*hetznerMetrics)(nil) + +type hetznerMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *hetznerMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *hetznerMetrics) Unregister() {} diff --git a/discovery/hetzner/mock_test.go b/discovery/hetzner/mock_test.go index ecf3132742..d192a4eae9 100644 --- a/discovery/hetzner/mock_test.go +++ b/discovery/hetzner/mock_test.go @@ -20,7 +20,7 @@ import ( "testing" ) -// SDMock is the interface for the Hetzner Cloud mock +// SDMock is the interface for the Hetzner Cloud mock. type SDMock struct { t *testing.T Server *httptest.Server @@ -34,19 +34,19 @@ func NewSDMock(t *testing.T) *SDMock { } } -// Endpoint returns the URI to the mock server +// Endpoint returns the URI to the mock server. func (m *SDMock) Endpoint() string { return m.Server.URL + "/" } -// Setup creates the mock server +// Setup creates the mock server. func (m *SDMock) Setup() { m.Mux = http.NewServeMux() m.Server = httptest.NewServer(m.Mux) m.t.Cleanup(m.Server.Close) } -// ShutdownServer creates the mock server +// ShutdownServer creates the mock server. func (m *SDMock) ShutdownServer() { m.Server.Close() } diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 1d8aa9302f..516470b05a 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -70,7 +70,7 @@ func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) { } func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { - req, err := http.NewRequest("GET", d.endpoint+"/server", nil) + req, err := http.NewRequest(http.MethodGet, d.endpoint+"/server", nil) if err != nil { return nil, err } @@ -112,7 +112,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP), hetznerLabelServerStatus: model.LabelValue(server.Server.Status), hetznerLabelRobotProduct: model.LabelValue(server.Server.Product), - hetznerLabelRobotCancelled: model.LabelValue(fmt.Sprintf("%t", server.Server.Canceled)), + hetznerLabelRobotCancelled: model.LabelValue(strconv.FormatBool(server.Server.Canceled)), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))), } @@ -122,7 +122,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) labels[hetznerLabelPublicIPv6Network] = model.LabelValue(fmt.Sprintf("%s/%s", subnet.IP, subnet.Mask)) break } - } targets[i] = labels } diff --git a/discovery/hetzner/robot_test.go b/discovery/hetzner/robot_test.go index f78a0bbda1..abee5fea90 100644 --- a/discovery/hetzner/robot_test.go +++ b/discovery/hetzner/robot_test.go @@ -47,12 +47,12 @@ func TestRobotSDRefresh(t *testing.T) { targetGroups, err := d.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup, "targetGroup should not be nil") require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil") - require.Equal(t, 2, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 2) for i, labelSet := range []model.LabelSet{ { @@ -98,5 +98,5 @@ func TestRobotSDRefreshHandleError(t *testing.T) { require.Error(t, err) require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error()) - require.Equal(t, 0, len(targetGroups)) + require.Empty(t, targetGroups) } diff --git a/discovery/http/http.go b/discovery/http/http.go index ec958c6148..ff76fd7627 100644 --- a/discovery/http/http.go +++ b/discovery/http/http.go @@ -45,17 +45,10 @@ var ( } userAgent = fmt.Sprintf("Prometheus/%s", version.Version) matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) - - failuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_http_failures_total", - Help: "Number of HTTP service discovery refresh failures.", - }) ) func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(failuresCount) } // SDConfig is the configuration for HTTP based discovery. @@ -65,12 +58,17 @@ type SDConfig struct { URL string `yaml:"url"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return newDiscovererMetrics(reg, rmi) +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "http" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger, opts.HTTPClientOptions) + return NewDiscovery(c, opts.Logger, opts.HTTPClientOptions, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -99,7 +97,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if parsedURL.Host == "" { return fmt.Errorf("host is missing in URL") } - return nil + return c.HTTPClientConfig.Validate() } const httpSDURLLabel = model.MetaLabelPrefix + "url" @@ -112,10 +110,16 @@ type Discovery struct { client *http.Client refreshInterval time.Duration tgLastLength int + metrics *httpMetrics } // NewDiscovery returns a new HTTP discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*httpMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + if logger == nil { logger = log.NewNopLogger() } @@ -130,19 +134,23 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPCli url: conf.URL, client: client, refreshInterval: time.Duration(conf.RefreshInterval), // Stored to be sent as headers. + metrics: m, } d.Discovery = refresh.NewDiscovery( - logger, - "http", - time.Duration(conf.RefreshInterval), - d.Refresh, + refresh.Options{ + Logger: logger, + Mech: "http", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.Refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) { - req, err := http.NewRequest("GET", d.url, nil) + req, err := http.NewRequest(http.MethodGet, d.url, nil) if err != nil { return nil, err } @@ -152,7 +160,7 @@ func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) { resp, err := d.client.Do(req.WithContext(ctx)) if err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, err } defer func() { @@ -161,31 +169,31 @@ func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) { }() if resp.StatusCode != http.StatusOK { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, fmt.Errorf("server returned HTTP status %s", resp.Status) } if !matchContentType.MatchString(strings.TrimSpace(resp.Header.Get("Content-Type"))) { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, fmt.Errorf("unsupported content type %q", resp.Header.Get("Content-Type")) } b, err := io.ReadAll(resp.Body) if err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, err } var targetGroups []*targetgroup.Group if err := json.Unmarshal(b, &targetGroups); err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, err } for i, tg := range targetGroups { if tg == nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() err = errors.New("nil target group item found") return nil, err } diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go index a284e7f361..0cafe035dc 100644 --- a/discovery/http/http_test.go +++ b/discovery/http/http_test.go @@ -28,6 +28,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -41,7 +42,14 @@ func TestHTTPValidRefresh(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + defer refreshMetrics.Unregister() + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() @@ -62,8 +70,8 @@ func TestHTTPValidRefresh(t *testing.T) { Source: urlSource(ts.URL+"/http_sd.good.json", 0), }, } - require.Equal(t, tgs, expectedTargets) - require.Equal(t, 0.0, getFailureCount()) + require.Equal(t, expectedTargets, tgs) + require.Equal(t, 0.0, getFailureCount(d.metrics.failuresCount)) } func TestHTTPInvalidCode(t *testing.T) { @@ -79,13 +87,20 @@ func TestHTTPInvalidCode(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + defer refreshMetrics.Unregister() + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() _, err = d.Refresh(ctx) require.EqualError(t, err, "server returned HTTP status 400 Bad Request") - require.Equal(t, 1.0, getFailureCount()) + require.Equal(t, 1.0, getFailureCount(d.metrics.failuresCount)) } func TestHTTPInvalidFormat(t *testing.T) { @@ -101,18 +116,23 @@ func TestHTTPInvalidFormat(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + defer refreshMetrics.Unregister() + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() _, err = d.Refresh(ctx) require.EqualError(t, err, `unsupported content type "text/plain; charset=utf-8"`) - require.Equal(t, 1.0, getFailureCount()) + require.Equal(t, 1.0, getFailureCount(d.metrics.failuresCount)) } -var lastFailureCount float64 - -func getFailureCount() float64 { +func getFailureCount(failuresCount prometheus.Counter) float64 { failureChan := make(chan prometheus.Metric) go func() { @@ -129,10 +149,7 @@ func getFailureCount() float64 { metric.Write(&counter) } - // account for failures in prior tests - count := *counter.Counter.Value - lastFailureCount - lastFailureCount = *counter.Counter.Value - return count + return *counter.Counter.Value } func TestContentTypeRegex(t *testing.T) { @@ -417,7 +434,15 @@ func TestSourceDisappeared(t *testing.T) { URL: ts.URL, RefreshInterval: model.Duration(1 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil) + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + defer refreshMetrics.Unregister() + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) require.NoError(t, err) for _, test := range cases { ctx := context.Background() diff --git a/discovery/http/metrics.go b/discovery/http/metrics.go new file mode 100644 index 0000000000..b1f8b84433 --- /dev/null +++ b/discovery/http/metrics.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package http + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*httpMetrics)(nil) + +type httpMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator + + failuresCount prometheus.Counter + + metricRegisterer discovery.MetricRegisterer +} + +func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + m := &httpMetrics{ + refreshMetrics: rmi, + failuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_http_failures_total", + Help: "Number of HTTP service discovery refresh failures.", + }), + } + + m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ + m.failuresCount, + }) + + return m +} + +// Register implements discovery.DiscovererMetrics. +func (m *httpMetrics) Register() error { + return m.metricRegisterer.RegisterMetrics() +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *httpMetrics) Unregister() { + m.metricRegisterer.UnregisterMetrics() +} diff --git a/discovery/ionos/ionos.go b/discovery/ionos/ionos.go index a13a000585..c8b4f7f8e5 100644 --- a/discovery/ionos/ionos.go +++ b/discovery/ionos/ionos.go @@ -14,10 +14,12 @@ package ionos import ( + "errors" + "fmt" "time" "github.com/go-kit/log" - "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -41,7 +43,12 @@ func init() { type Discovery struct{} // NewDiscovery returns a new refresh.Discovery for IONOS Cloud. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { + m, ok := metrics.(*ionosMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + if conf.ionosEndpoint == "" { conf.ionosEndpoint = "https://api.ionos.com" } @@ -52,10 +59,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) } return refresh.NewDiscovery( - logger, - "ionos", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "ionos", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ), nil } @@ -79,6 +89,13 @@ type SDConfig struct { ionosEndpoint string // For tests only. } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &ionosMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the IONOS Cloud service discovery. func (c SDConfig) Name() string { return "ionos" @@ -86,7 +103,7 @@ func (c SDConfig) Name() string { // NewDiscoverer returns a new discovery.Discoverer for IONOS Cloud. func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(&c, options.Logger) + return NewDiscovery(&c, options.Logger, options.Metrics) } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/discovery/ionos/metrics.go b/discovery/ionos/metrics.go new file mode 100644 index 0000000000..88e465acf3 --- /dev/null +++ b/discovery/ionos/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ionos + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*ionosMetrics)(nil) + +type ionosMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *ionosMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *ionosMetrics) Unregister() {} diff --git a/discovery/ionos/server_test.go b/discovery/ionos/server_test.go index 92f2a96f9d..30f358e325 100644 --- a/discovery/ionos/server_test.go +++ b/discovery/ionos/server_test.go @@ -48,12 +48,12 @@ func TestIONOSServerRefresh(t *testing.T) { tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 2, len(tg.Targets)) + require.Len(t, tg.Targets, 2) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 7200d52dde..542bc95edc 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many legitimately empty blocks in this file. package kubernetes import ( @@ -23,6 +22,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" @@ -31,12 +31,6 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - epAddCount = eventCount.WithLabelValues("endpoints", "add") - epUpdateCount = eventCount.WithLabelValues("endpoints", "update") - epDeleteCount = eventCount.WithLabelValues("endpoints", "delete") -) - // Endpoints discovers new endpoint targets. type Endpoints struct { logger log.Logger @@ -55,10 +49,21 @@ type Endpoints struct { } // NewEndpoints returns a new endpoints discovery. -func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer) *Endpoints { +func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { if l == nil { l = log.NewNopLogger() } + + epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd) + epUpdateCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleUpdate) + epDeleteCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleDelete) + + svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) + svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate) + svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete) + + podUpdateCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleUpdate) + e := &Endpoints{ logger: l, endpointsInf: eps, @@ -69,7 +74,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca podStore: pod.GetStore(), nodeInf: node, withNodeMetadata: node != nil, - queue: workqueue.NewNamed("endpoints"), + queue: workqueue.NewNamed(RoleEndpoint.String()), } _, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -128,6 +133,29 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca if err != nil { level.Error(l).Log("msg", "Error adding services event handler.", "err", err) } + _, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(old, cur interface{}) { + podUpdateCount.Inc() + oldPod, ok := old.(*apiv1.Pod) + if !ok { + return + } + + curPod, ok := cur.(*apiv1.Pod) + if !ok { + return + } + + // the Pod's phase may change without triggering an update on the Endpoints/Service. + // https://github.com/prometheus/prometheus/issues/11305. + if curPod.Status.Phase != oldPod.Status.Phase { + e.enqueuePod(namespacedName(curPod.Namespace, curPod.Name)) + } + }, + }) + if err != nil { + level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + } if e.withNodeMetadata { _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { @@ -163,6 +191,18 @@ func (e *Endpoints) enqueueNode(nodeName string) { } } +func (e *Endpoints) enqueuePod(podNamespacedName string) { + endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName) + if err != nil { + level.Error(e.logger).Log("msg", "Error getting endpoints for pod", "pod", podNamespacedName, "err", err) + return + } + + for _, endpoint := range endpoints { + e.enqueue(endpoint) + } +} + func (e *Endpoints) enqueue(obj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { @@ -309,7 +349,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { tg.Targets = append(tg.Targets, target) return } - s := pod.Namespace + "/" + pod.Name + s := namespacedName(pod.Namespace, pod.Name) sp, ok := seenPods[s] if !ok { @@ -321,16 +361,19 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { if port.Port == cport.ContainerPort { ports := strconv.FormatUint(uint64(port.Port), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(port.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -366,7 +409,13 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { // For all seen pods, check all container ports. If they were not covered // by one of the service endpoints, generate targets for them. for _, pe := range seenPods { - for _, c := range pe.pod.Spec.Containers { + // PodIP can be empty when a pod is starting or has been evicted. + if len(pe.pod.Status.PodIP) == 0 { + continue + } + + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { @@ -380,21 +429,20 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { continue } - // PodIP can be empty when a pod is starting or has been evicted. - if len(pe.pod.Status.PodIP) != 0 { - a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) - ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) - - target := model.LabelSet{ - model.AddressLabel: lv(a), - podContainerNameLabel: lv(c.Name), - podContainerImageLabel: lv(c.Image), - podContainerPortNameLabel: lv(cport.Name), - podContainerPortNumberLabel: lv(ports), - podContainerPortProtocolLabel: lv(string(cport.Protocol)), - } - tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) + a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) + ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + + isInit := i >= len(pe.pod.Spec.Containers) + target := model.LabelSet{ + model.AddressLabel: lv(a), + podContainerNameLabel: lv(c.Name), + podContainerImageLabel: lv(c.Image), + podContainerPortNameLabel: lv(cport.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } + tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index cf7fd9aee0..4af6889602 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -244,6 +244,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", @@ -259,6 +260,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -821,6 +823,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -969,3 +972,288 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { expectedRes: map[string]*targetgroup.Group{}, }.Run(t) } + +// TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes. +// See https://github.com/prometheus/prometheus/issues/11305 for more details. +func TestEndpointsDiscoveryUpdatePod(t *testing.T) { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + Containers: []v1.Container{ + { + Name: "c1", + Image: "c1:latest", + Ports: []v1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + // Pod is in Pending phase when discovered for first time. + Phase: "Pending", + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionFalse, + }, + }, + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + } + objs := []runtime.Object{ + &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testendpoints", + Namespace: "default", + }, + Subsets: []v1.EndpointSubset{ + { + Addresses: []v1.EndpointAddress{ + { + IP: "4.3.2.1", + // The Pending Pod may be included because the Endpoints was created manually. + // Or because the corresponding service has ".spec.publishNotReadyAddresses: true". + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "mainport", + Port: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + pod, + } + n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, objs...) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + // the Pod becomes Ready. + pod.Status.Phase = "Running" + pod.Status.Conditions = []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionTrue, + }, + } + c.CoreV1().Pods(pod.Namespace).Update(context.Background(), pod, metav1.UpdateOptions{}) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testendpoints": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpoint_port_name": "mainport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_ready": "true", + "__meta_kubernetes_pod_phase": "Running", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_image": "c1:latest", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + }, + Source: "endpoints/default/testendpoints", + }, + }, + }.Run(t) +} + +func TestEndpointsDiscoverySidecarContainer(t *testing.T) { + objs := []runtime.Object{ + &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testsidecar", + Namespace: "default", + }, + Subsets: []v1.EndpointSubset{ + { + Addresses: []v1.EndpointAddress{ + { + IP: "4.3.2.1", + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9000, + Protocol: v1.ProtocolTCP, + }, + { + Name: "initport", + Port: 9111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + InitContainers: []v1.Container{ + { + Name: "ic1", + Image: "ic1:latest", + Ports: []v1.ContainerPort{ + { + Name: "initport", + ContainerPort: 1111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + { + Name: "ic2", + Image: "ic2:latest", + Ports: []v1.ContainerPort{ + { + Name: "initport", + ContainerPort: 9111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "c1", + Image: "c1:latest", + Ports: []v1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + }, + } + + n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testsidecar": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_pod_container_image": "c1:latest", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", + }, + { + "__address__": "4.3.2.1:9111", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_endpoint_port_name": "initport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_pod_container_image": "ic2:latest", + "__meta_kubernetes_pod_container_name": "ic2", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "9111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + { + "__address__": "4.3.2.1:1111", + "__meta_kubernetes_pod_container_image": "ic1:latest", + "__meta_kubernetes_pod_container_name": "ic1", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "1111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_endpoints_name": "testsidecar", + "__meta_kubernetes_namespace": "default", + }, + Source: "endpoints/default/testsidecar", + }, + }, + }.Run(t) +} diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index e241c758bc..1368303104 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -15,16 +15,17 @@ package kubernetes import ( "context" + "errors" "fmt" "net" "strconv" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -32,12 +33,6 @@ import ( "github.com/prometheus/prometheus/util/strutil" ) -var ( - epslAddCount = eventCount.WithLabelValues("endpointslice", "add") - epslUpdateCount = eventCount.WithLabelValues("endpointslice", "update") - epslDeleteCount = eventCount.WithLabelValues("endpointslice", "delete") -) - // EndpointSlice discovers new endpoint targets. type EndpointSlice struct { logger log.Logger @@ -56,10 +51,19 @@ type EndpointSlice struct { } // NewEndpointSlice returns a new endpointslice discovery. -func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer) *EndpointSlice { +func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { if l == nil { l = log.NewNopLogger() } + + epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd) + epslUpdateCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleUpdate) + epslDeleteCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleDelete) + + svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) + svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate) + svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete) + e := &EndpointSlice{ logger: l, endpointSliceInf: eps, @@ -70,7 +74,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod podStore: pod.GetStore(), nodeInf: node, withNodeMetadata: node != nil, - queue: workqueue.NewNamed("endpointSlice"), + queue: workqueue.NewNamed(RoleEndpointSlice.String()), } _, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -98,9 +102,9 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod return } - // TODO(brancz): use cache.Indexer to index endpoints by - // disv1beta1.LabelServiceName so this operation doesn't have to - // iterate over all endpoint objects. + // TODO(brancz): use cache.Indexer to index endpointslices by + // LabelServiceName so this operation doesn't have to iterate over all + // endpoint objects. for _, obj := range e.endpointSliceStore.List() { esa, err := e.getEndpointSliceAdaptor(obj) if err != nil { @@ -183,14 +187,14 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) cacheSyncs = append(cacheSyncs, e.nodeInf.HasSynced) } if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { - if ctx.Err() != context.Canceled { + if !errors.Is(ctx.Err(), context.Canceled) { level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache") } return } go func() { - for e.process(ctx, ch) { // nolint:revive + for e.process(ctx, ch) { } }() @@ -236,8 +240,6 @@ func (e *EndpointSlice) getEndpointSliceAdaptor(o interface{}) (endpointSliceAda switch endpointSlice := o.(type) { case *v1.EndpointSlice: return newEndpointSliceAdaptorFromV1(endpointSlice), nil - case *v1beta1.EndpointSlice: - return newEndpointSliceAdaptorFromV1beta1(endpointSlice), nil default: return nil, fmt.Errorf("received unexpected object: %v", o) } @@ -260,7 +262,9 @@ const ( endpointSliceEndpointConditionsReadyLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_ready" endpointSliceEndpointConditionsServingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_serving" endpointSliceEndpointConditionsTerminatingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_terminating" + endpointSliceEndpointZoneLabel = metaLabelPrefix + "endpointslice_endpoint_zone" endpointSliceEndpointHostnameLabel = metaLabelPrefix + "endpointslice_endpoint_hostname" + endpointSliceEndpointNodenameLabel = metaLabelPrefix + "endpointslice_endpoint_node_name" endpointSliceAddressTargetKindLabel = metaLabelPrefix + "endpointslice_address_target_kind" endpointSliceAddressTargetNameLabel = metaLabelPrefix + "endpointslice_address_target_name" endpointSliceEndpointTopologyLabelPrefix = metaLabelPrefix + "endpointslice_endpoint_topology_" @@ -333,6 +337,14 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou target[model.LabelName(endpointSliceAddressTargetNameLabel)] = lv(ep.targetRef().Name) } + if ep.nodename() != nil { + target[endpointSliceEndpointNodenameLabel] = lv(*ep.nodename()) + } + + if ep.zone() != nil { + target[model.LabelName(endpointSliceEndpointZoneLabel)] = lv(*ep.zone()) + } + for k, v := range ep.topology() { ln := strutil.SanitizeLabelName(k) target[model.LabelName(endpointSliceEndpointTopologyLabelPrefix+ln)] = lv(v) @@ -353,7 +365,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou tg.Targets = append(tg.Targets, target) return } - s := pod.Namespace + "/" + pod.Name + s := namespacedName(pod.Namespace, pod.Name) sp, ok := seenPods[s] if !ok { @@ -365,19 +377,23 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { if port.port() == nil { continue } + if *port.port() == cport.ContainerPort { ports := strconv.FormatUint(uint64(*port.port()), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(cport.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -400,7 +416,13 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou // For all seen pods, check all container ports. If they were not covered // by one of the service endpoints, generate targets for them. for _, pe := range seenPods { - for _, c := range pe.pod.Spec.Containers { + // PodIP can be empty when a pod is starting or has been evicted. + if len(pe.pod.Status.PodIP) == 0 { + continue + } + + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { @@ -417,21 +439,20 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou continue } - // PodIP can be empty when a pod is starting or has been evicted. - if len(pe.pod.Status.PodIP) != 0 { - a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) - ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) - - target := model.LabelSet{ - model.AddressLabel: lv(a), - podContainerNameLabel: lv(c.Name), - podContainerImageLabel: lv(c.Image), - podContainerPortNameLabel: lv(cport.Name), - podContainerPortNumberLabel: lv(ports), - podContainerPortProtocolLabel: lv(string(cport.Protocol)), - } - tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) + a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) + ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + + isInit := i >= len(pe.pod.Spec.Containers) + target := model.LabelSet{ + model.AddressLabel: lv(a), + podContainerNameLabel: lv(c.Name), + podContainerImageLabel: lv(c.Image), + podContainerPortNameLabel: lv(cport.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } + tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } diff --git a/discovery/kubernetes/endpointslice_adaptor.go b/discovery/kubernetes/endpointslice_adaptor.go index 46fa708c10..81243e2ce0 100644 --- a/discovery/kubernetes/endpointslice_adaptor.go +++ b/discovery/kubernetes/endpointslice_adaptor.go @@ -16,11 +16,10 @@ package kubernetes import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// endpointSliceAdaptor is an adaptor for the different EndpointSlice versions +// endpointSliceAdaptor is an adaptor for the different EndpointSlice versions. type endpointSliceAdaptor interface { get() interface{} getObjectMeta() metav1.ObjectMeta @@ -44,6 +43,7 @@ type endpointSliceEndpointAdaptor interface { addresses() []string hostname() *string nodename() *string + zone() *string conditions() endpointSliceEndpointConditionsAdaptor targetRef() *corev1.ObjectReference topology() map[string]string @@ -55,7 +55,7 @@ type endpointSliceEndpointConditionsAdaptor interface { terminating() *bool } -// Adaptor for k8s.io/api/discovery/v1 +// Adaptor for k8s.io/api/discovery/v1. type endpointSliceAdaptorV1 struct { endpointSlice *v1.EndpointSlice } @@ -108,59 +108,6 @@ func (e *endpointSliceAdaptorV1) labelServiceName() string { return v1.LabelServiceName } -// Adaptor for k8s.io/api/discovery/v1beta1 -type endpointSliceAdaptorV1Beta1 struct { - endpointSlice *v1beta1.EndpointSlice -} - -func newEndpointSliceAdaptorFromV1beta1(endpointSlice *v1beta1.EndpointSlice) endpointSliceAdaptor { - return &endpointSliceAdaptorV1Beta1{endpointSlice: endpointSlice} -} - -func (e *endpointSliceAdaptorV1Beta1) get() interface{} { - return e.endpointSlice -} - -func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { - return e.endpointSlice.ObjectMeta -} - -func (e *endpointSliceAdaptorV1Beta1) name() string { - return e.endpointSlice.Name -} - -func (e *endpointSliceAdaptorV1Beta1) namespace() string { - return e.endpointSlice.Namespace -} - -func (e *endpointSliceAdaptorV1Beta1) addressType() string { - return string(e.endpointSlice.AddressType) -} - -func (e *endpointSliceAdaptorV1Beta1) endpoints() []endpointSliceEndpointAdaptor { - eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints)) - for i := 0; i < len(e.endpointSlice.Endpoints); i++ { - eps = append(eps, newEndpointSliceEndpointAdaptorFromV1beta1(e.endpointSlice.Endpoints[i])) - } - return eps -} - -func (e *endpointSliceAdaptorV1Beta1) ports() []endpointSlicePortAdaptor { - ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports)) - for i := 0; i < len(e.endpointSlice.Ports); i++ { - ports = append(ports, newEndpointSlicePortAdaptorFromV1beta1(e.endpointSlice.Ports[i])) - } - return ports -} - -func (e *endpointSliceAdaptorV1Beta1) labels() map[string]string { - return e.endpointSlice.Labels -} - -func (e *endpointSliceAdaptorV1Beta1) labelServiceName() string { - return v1beta1.LabelServiceName -} - type endpointSliceEndpointAdaptorV1 struct { endpoint v1.Endpoint } @@ -181,6 +128,10 @@ func (e *endpointSliceEndpointAdaptorV1) nodename() *string { return e.endpoint.NodeName } +func (e *endpointSliceEndpointAdaptorV1) zone() *string { + return e.endpoint.Zone +} + func (e *endpointSliceEndpointAdaptorV1) conditions() endpointSliceEndpointConditionsAdaptor { return newEndpointSliceEndpointConditionsAdaptorFromV1(e.endpoint.Conditions) } @@ -213,58 +164,6 @@ func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool { return e.endpointConditions.Terminating } -type endpointSliceEndpointAdaptorV1beta1 struct { - endpoint v1beta1.Endpoint -} - -func newEndpointSliceEndpointAdaptorFromV1beta1(endpoint v1beta1.Endpoint) endpointSliceEndpointAdaptor { - return &endpointSliceEndpointAdaptorV1beta1{endpoint: endpoint} -} - -func (e *endpointSliceEndpointAdaptorV1beta1) addresses() []string { - return e.endpoint.Addresses -} - -func (e *endpointSliceEndpointAdaptorV1beta1) hostname() *string { - return e.endpoint.Hostname -} - -func (e *endpointSliceEndpointAdaptorV1beta1) nodename() *string { - return e.endpoint.NodeName -} - -func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor { - return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions) -} - -func (e *endpointSliceEndpointAdaptorV1beta1) targetRef() *corev1.ObjectReference { - return e.endpoint.TargetRef -} - -func (e *endpointSliceEndpointAdaptorV1beta1) topology() map[string]string { - return e.endpoint.Topology -} - -type endpointSliceEndpointConditionsAdaptorV1beta1 struct { - endpointConditions v1beta1.EndpointConditions -} - -func newEndpointSliceEndpointConditionsAdaptorFromV1beta1(endpointConditions v1beta1.EndpointConditions) endpointSliceEndpointConditionsAdaptor { - return &endpointSliceEndpointConditionsAdaptorV1beta1{endpointConditions: endpointConditions} -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool { - return e.endpointConditions.Ready -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool { - return e.endpointConditions.Serving -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool { - return e.endpointConditions.Terminating -} - type endpointSlicePortAdaptorV1 struct { endpointPort v1.EndpointPort } @@ -289,28 +188,3 @@ func (e *endpointSlicePortAdaptorV1) protocol() *string { func (e *endpointSlicePortAdaptorV1) appProtocol() *string { return e.endpointPort.AppProtocol } - -type endpointSlicePortAdaptorV1beta1 struct { - endpointPort v1beta1.EndpointPort -} - -func newEndpointSlicePortAdaptorFromV1beta1(port v1beta1.EndpointPort) endpointSlicePortAdaptor { - return &endpointSlicePortAdaptorV1beta1{endpointPort: port} -} - -func (e *endpointSlicePortAdaptorV1beta1) name() *string { - return e.endpointPort.Name -} - -func (e *endpointSlicePortAdaptorV1beta1) port() *int32 { - return e.endpointPort.Port -} - -func (e *endpointSlicePortAdaptorV1beta1) protocol() *string { - val := string(*e.endpointPort.Protocol) - return &val -} - -func (e *endpointSlicePortAdaptorV1beta1) appProtocol() *string { - return e.endpointPort.AppProtocol -} diff --git a/discovery/kubernetes/endpointslice_adaptor_test.go b/discovery/kubernetes/endpointslice_adaptor_test.go index e564910936..de33c64b66 100644 --- a/discovery/kubernetes/endpointslice_adaptor_test.go +++ b/discovery/kubernetes/endpointslice_adaptor_test.go @@ -18,7 +18,6 @@ import ( "github.com/stretchr/testify/require" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" ) func Test_EndpointSliceAdaptor_v1(t *testing.T) { @@ -29,7 +28,7 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) { require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace()) require.Equal(t, endpointSlice.AddressType, v1.AddressType(adaptor.addressType())) require.Equal(t, endpointSlice.Labels, adaptor.labels()) - require.Equal(t, endpointSlice.Labels[v1.LabelServiceName], "testendpoints") + require.Equal(t, "testendpoints", endpointSlice.Labels[v1.LabelServiceName]) for i, endpointAdaptor := range adaptor.endpoints() { require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) @@ -48,31 +47,3 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) { require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol()) } } - -func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) { - endpointSlice := makeEndpointSliceV1beta1() - adaptor := newEndpointSliceAdaptorFromV1beta1(endpointSlice) - - require.Equal(t, endpointSlice.ObjectMeta.Name, adaptor.name()) - require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace()) - require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType())) - require.Equal(t, endpointSlice.Labels, adaptor.labels()) - require.Equal(t, endpointSlice.Labels[v1beta1.LabelServiceName], "testendpoints") - - for i, endpointAdaptor := range adaptor.endpoints() { - require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) - require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating()) - require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef()) - require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology()) - } - - for i, portAdaptor := range adaptor.ports() { - require.Equal(t, endpointSlice.Ports[i].Name, portAdaptor.name()) - require.Equal(t, endpointSlice.Ports[i].Port, portAdaptor.port()) - require.EqualValues(t, endpointSlice.Ports[i].Protocol, portAdaptor.protocol()) - require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol()) - } -} diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index f23c9e6557..cc92c7ddaa 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -18,9 +18,9 @@ import ( "testing" "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -79,6 +79,7 @@ func makeEndpointSliceV1() *v1.EndpointSlice { DeprecatedTopology: map[string]string{ "topology": "value", }, + Zone: strptr("us-east-1a"), }, { Addresses: []string{"2.3.4.5"}, Conditions: v1.EndpointConditions{ @@ -86,6 +87,7 @@ func makeEndpointSliceV1() *v1.EndpointSlice { Serving: boolptr(true), Terminating: boolptr(false), }, + Zone: strptr("us-east-1b"), }, { Addresses: []string{"3.4.5.6"}, Conditions: v1.EndpointConditions{ @@ -93,6 +95,7 @@ func makeEndpointSliceV1() *v1.EndpointSlice { Serving: boolptr(true), Terminating: boolptr(true), }, + Zone: strptr("us-east-1c"), }, { Addresses: []string{"4.5.6.7"}, Conditions: v1.EndpointConditions{ @@ -104,67 +107,14 @@ func makeEndpointSliceV1() *v1.EndpointSlice { Kind: "Node", Name: "barbaz", }, - }, - }, - } -} - -func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice { - return &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - Labels: map[string]string{ - v1beta1.LabelServiceName: "testendpoints", - }, - Annotations: map[string]string{ - "test.annotation": "test", - }, - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{ - { - Addresses: []string{"1.2.3.4"}, - Hostname: strptr("testendpoint1"), - }, { - Addresses: []string{"2.3.4.5"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - Serving: boolptr(true), - Terminating: boolptr(false), - }, - }, { - Addresses: []string{"3.4.5.6"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(false), - Serving: boolptr(true), - Terminating: boolptr(true), - }, - }, { - Addresses: []string{"4.5.6.7"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - Serving: boolptr(true), - Terminating: boolptr(false), - }, - TargetRef: &corev1.ObjectReference{ - Kind: "Node", - Name: "barbaz", - }, + Zone: strptr("us-east-1a"), }, }, } } func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.25.0") + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ discovery: n, @@ -184,8 +134,10 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", + "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -196,6 +148,7 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -206,6 +159,7 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -218,6 +172,7 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -239,71 +194,6 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { }.Run(t) } -func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "1.20.0") - - k8sDiscoveryTest{ - discovery: n, - beforeRun: func() { - obj := makeEndpointSliceV1beta1() - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: map[string]*targetgroup.Group{ - "endpointslice/default/testendpoints": { - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, - Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - }, - Source: "endpointslice/default/testendpoints", - }, - }, - }.Run(t) -} - func TestEndpointSliceDiscoveryAdd(t *testing.T) { obj := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -343,25 +233,25 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { PodIP: "1.2.3.4", }, } - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.20.0", obj) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, obj) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ + obj := &v1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ + AddressType: v1.AddressTypeIPv4, + Ports: []v1.EndpointPort{ { Name: strptr("testport"), Port: int32ptr(9000), Protocol: protocolptr(corev1.ProtocolTCP), }, }, - Endpoints: []v1beta1.Endpoint{ + Endpoints: []v1.Endpoint{ { Addresses: []string{"4.3.2.1"}, TargetRef: &corev1.ObjectReference{ @@ -369,13 +259,13 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { Name: "testpod", Namespace: "default", }, - Conditions: v1beta1.EndpointConditions{ + Conditions: v1.EndpointConditions{ Ready: boolptr(false), }, }, }, } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) + c.DiscoveryV1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ @@ -401,6 +291,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", @@ -416,6 +307,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -430,113 +322,34 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { } func TestEndpointSliceDiscoveryDelete(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeEndpointSliceV1() - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) + c.DiscoveryV1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Source: "endpointslice/default/testendpoints", - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "", - "__meta_kubernetes_endpointslice_address_target_name": "", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", - "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - "__meta_kubernetes_namespace": "default", - }, }, }, }.Run(t) } func TestEndpointSliceDiscoveryUpdate(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{ - { - Addresses: []string{"1.2.3.4"}, - Hostname: strptr("testendpoint1"), - }, { - Addresses: []string{"2.3.4.5"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - }, - }, - }, - } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) + obj := makeEndpointSliceV1() + obj.ObjectMeta.Labels = nil + obj.ObjectMeta.Annotations = nil + obj.Endpoints = obj.Endpoints[0:2] + c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ @@ -551,8 +364,10 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", + "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -563,28 +378,7 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -592,13 +386,9 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_namespace": "default", }, }, }, @@ -606,80 +396,18 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { } func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{}, - } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) + obj := makeEndpointSliceV1() + obj.Endpoints = []v1.Endpoint{} + c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "", - "__meta_kubernetes_endpointslice_address_target_name": "", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", - "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_name": "testendpoints", @@ -696,7 +424,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { } func TestEndpointSliceDiscoveryWithService(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, @@ -724,8 +452,10 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", + "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -736,6 +466,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -746,6 +477,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -758,6 +490,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -783,7 +516,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { } func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, @@ -824,8 +557,10 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", + "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -836,6 +571,7 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", @@ -846,6 +582,7 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", @@ -858,6 +595,7 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -914,8 +652,10 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", + "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -929,6 +669,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -939,6 +680,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -951,6 +693,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -1014,8 +757,10 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", + "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -1029,6 +774,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -1039,6 +785,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -1051,6 +798,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -1160,8 +908,10 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", + "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -1172,6 +922,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", @@ -1182,6 +933,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", @@ -1194,6 +946,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -1235,6 +988,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -1308,8 +1062,10 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", + "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -1320,6 +1076,7 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", @@ -1330,6 +1087,7 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", @@ -1342,6 +1100,7 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", "__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", @@ -1405,3 +1164,203 @@ func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) { expectedRes: map[string]*targetgroup.Group{}, }.Run(t) } + +// TestEndpointSliceInfIndexersCount makes sure that RoleEndpointSlice discovery +// sets up indexing for the main Kube informer only when needed. +// See: https://github.com/prometheus/prometheus/pull/13554#discussion_r1490965817 +func TestEndpointSliceInfIndexersCount(t *testing.T) { + tests := []struct { + name string + withNodeMetadata bool + }{ + {"with node metadata", true}, + {"without node metadata", false}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + n *Discovery + mainInfIndexersCount int + ) + if tc.withNodeMetadata { + mainInfIndexersCount = 1 + n, _ = makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, AttachMetadataConfig{Node: true}) + } else { + n, _ = makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}) + } + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + n.RLock() + defer n.RUnlock() + require.Len(t, n.discoverers, 1) + require.Len(t, n.discoverers[0].(*EndpointSlice).endpointSliceInf.GetIndexer().GetIndexers(), mainInfIndexersCount) + }, + }.Run(t) + }) + } +} + +func TestEndpointSliceDiscoverySidecarContainer(t *testing.T) { + objs := []runtime.Object{ + &v1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testsidecar", + Namespace: "default", + }, + AddressType: v1.AddressTypeIPv4, + Ports: []v1.EndpointPort{ + { + Name: strptr("testport"), + Port: int32ptr(9000), + Protocol: protocolptr(corev1.ProtocolTCP), + }, + { + Name: strptr("initport"), + Port: int32ptr(9111), + Protocol: protocolptr(corev1.ProtocolTCP), + }, + }, + Endpoints: []v1.Endpoint{ + { + Addresses: []string{"4.3.2.1"}, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: corev1.PodSpec{ + NodeName: "testnode", + InitContainers: []corev1.Container{ + { + Name: "ic1", + Image: "ic1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "initport", + ContainerPort: 1111, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + { + Name: "ic2", + Image: "ic2:latest", + Ports: []corev1.ContainerPort{ + { + Name: "initport", + ContainerPort: 9111, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "c1", + Image: "c1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + }, + } + + n, _ := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpointslice/default/testsidecar": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Pod", + "__meta_kubernetes_endpointslice_address_target_name": "testpod", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_pod_container_image": "c1:latest", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", + }, + { + "__address__": "4.3.2.1:9111", + "__meta_kubernetes_endpointslice_address_target_kind": "Pod", + "__meta_kubernetes_endpointslice_address_target_name": "testpod", + "__meta_kubernetes_endpointslice_port": "9111", + "__meta_kubernetes_endpointslice_port_name": "initport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_pod_container_image": "ic2:latest", + "__meta_kubernetes_pod_container_name": "ic2", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "9111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + { + "__address__": "4.3.2.1:1111", + "__meta_kubernetes_pod_container_image": "ic1:latest", + "__meta_kubernetes_pod_container_name": "ic1", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "1111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testsidecar", + "__meta_kubernetes_namespace": "default", + }, + Source: "endpointslice/default/testsidecar", + }, + }, + }.Run(t) +} diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index 697b6f5198..4d91e7a460 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -21,21 +21,15 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - ingressAddCount = eventCount.WithLabelValues("ingress", "add") - ingressUpdateCount = eventCount.WithLabelValues("ingress", "update") - ingressDeleteCount = eventCount.WithLabelValues("ingress", "delete") -) - // Ingress implements discovery of Kubernetes ingress. type Ingress struct { logger log.Logger @@ -45,8 +39,18 @@ type Ingress struct { } // NewIngress returns a new ingress discovery. -func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress { - s := &Ingress{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("ingress")} +func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { + ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd) + ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate) + ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete) + + s := &Ingress{ + logger: l, + informer: inf, + store: inf.GetStore(), + queue: workqueue.NewNamed(RoleIngress.String()), + } + _, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { ingressAddCount.Inc() @@ -88,7 +92,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for i.process(ctx, ch) { // nolint:revive + for i.process(ctx, ch) { } }() @@ -122,8 +126,6 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b switch ingress := o.(type) { case *v1.Ingress: ia = newIngressAdaptorFromV1(ingress) - case *v1beta1.Ingress: - ia = newIngressAdaptorFromV1beta1(ingress) default: level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err", fmt.Errorf("received unexpected object: %v", o)) diff --git a/discovery/kubernetes/ingress_adaptor.go b/discovery/kubernetes/ingress_adaptor.go index 7be8538b53..84281196b4 100644 --- a/discovery/kubernetes/ingress_adaptor.go +++ b/discovery/kubernetes/ingress_adaptor.go @@ -15,11 +15,10 @@ package kubernetes import ( v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ingressAdaptor is an adaptor for the different Ingress versions +// ingressAdaptor is an adaptor for the different Ingress versions. type ingressAdaptor interface { getObjectMeta() metav1.ObjectMeta name() string @@ -36,7 +35,7 @@ type ingressRuleAdaptor interface { host() string } -// Adaptor for networking.k8s.io/v1 +// Adaptor for networking.k8s.io/v1. type ingressAdaptorV1 struct { ingress *v1.Ingress } @@ -89,56 +88,3 @@ func (i *ingressRuleAdaptorV1) paths() []string { } func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host } - -// Adaptor for networking.k8s.io/v1beta1 -type ingressAdaptorV1Beta1 struct { - ingress *v1beta1.Ingress -} - -func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor { - return &ingressAdaptorV1Beta1{ingress: ingress} -} -func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } -func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } - -func (i *ingressAdaptorV1Beta1) tlsHosts() []string { - var hosts []string - for _, tls := range i.ingress.Spec.TLS { - hosts = append(hosts, tls.Hosts...) - } - return hosts -} - -func (i *ingressAdaptorV1Beta1) rules() []ingressRuleAdaptor { - var rules []ingressRuleAdaptor - for _, rule := range i.ingress.Spec.Rules { - rules = append(rules, newIngressRuleAdaptorFromV1Beta1(rule)) - } - return rules -} - -type ingressRuleAdaptorV1Beta1 struct { - rule v1beta1.IngressRule -} - -func newIngressRuleAdaptorFromV1Beta1(rule v1beta1.IngressRule) ingressRuleAdaptor { - return &ingressRuleAdaptorV1Beta1{rule: rule} -} - -func (i *ingressRuleAdaptorV1Beta1) paths() []string { - rv := i.rule.IngressRuleValue - if rv.HTTP == nil { - return nil - } - paths := make([]string, len(rv.HTTP.Paths)) - for n, p := range rv.HTTP.Paths { - paths[n] = p.Path - } - return paths -} - -func (i *ingressRuleAdaptorV1Beta1) host() string { return i.rule.Host } diff --git a/discovery/kubernetes/ingress_test.go b/discovery/kubernetes/ingress_test.go index 8e6654c2cc..9bddfb1e14 100644 --- a/discovery/kubernetes/ingress_test.go +++ b/discovery/kubernetes/ingress_test.go @@ -20,7 +20,6 @@ import ( "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -89,60 +88,6 @@ func makeIngress(tls TLSMode) *v1.Ingress { return ret } -func makeIngressV1beta1(tls TLSMode) *v1beta1.Ingress { - ret := &v1beta1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testingress", - Namespace: "default", - Labels: map[string]string{"test/label": "testvalue"}, - Annotations: map[string]string{"test/annotation": "testannotationvalue"}, - }, - Spec: v1beta1.IngressSpec{ - IngressClassName: classString("testclass"), - TLS: nil, - Rules: []v1beta1.IngressRule{ - { - Host: "example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{ - {Path: "/"}, - {Path: "/foo"}, - }, - }, - }, - }, - { - // No backend config, ignored - Host: "nobackend.example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{}, - }, - }, - { - Host: "test.example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{{}}, - }, - }, - }, - }, - }, - } - - switch tls { - case TLSYes: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}} - case TLSMixed: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com"}}} - case TLSWildcard: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"*.example.com"}}} - } - - return ret -} - func classString(v string) *string { return &v } @@ -212,20 +157,6 @@ func TestIngressDiscoveryAdd(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSNo) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSNo), - }.Run(t) -} - func TestIngressDiscoveryAddTLS(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) @@ -240,20 +171,6 @@ func TestIngressDiscoveryAddTLS(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddTLSV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSYes) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSYes), - }.Run(t) -} - func TestIngressDiscoveryAddMixed(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) @@ -268,20 +185,6 @@ func TestIngressDiscoveryAddMixed(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddMixedV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSMixed) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSMixed), - }.Run(t) -} - func TestIngressDiscoveryNamespaces(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) @@ -303,27 +206,6 @@ func TestIngressDiscoveryNamespaces(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryNamespacesV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}, "v1.18.0") - - expected := expectedTargetGroups("ns1", TLSNo) - for k, v := range expectedTargetGroups("ns2", TLSNo) { - expected[k] = v - } - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - for _, ns := range []string{"ns1", "ns2"} { - obj := makeIngressV1beta1(TLSNo) - obj.Namespace = ns - c.NetworkingV1beta1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) - } - }, - expectedMaxItems: 2, - expectedRes: expected, - }.Run(t) -} - func TestIngressDiscoveryOwnNamespace(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true}) diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index ca5ee49e28..93ac65d8dc 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -25,8 +25,6 @@ import ( "github.com/prometheus/prometheus/util/strutil" - disv1beta1 "k8s.io/api/discovery/v1beta1" - "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -36,12 +34,10 @@ import ( apiv1 "k8s.io/api/core/v1" disv1 "k8s.io/api/discovery/v1" networkv1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -58,25 +54,15 @@ import ( const ( // metaLabelPrefix is the meta prefix used for all meta labels. // in this discovery. - metaLabelPrefix = model.MetaLabelPrefix + "kubernetes_" - namespaceLabel = metaLabelPrefix + "namespace" - metricsNamespace = "prometheus_sd_kubernetes" - presentValue = model.LabelValue("true") + metaLabelPrefix = model.MetaLabelPrefix + "kubernetes_" + namespaceLabel = metaLabelPrefix + "namespace" + presentValue = model.LabelValue("true") ) var ( - // Http header + // Http header. userAgent = fmt.Sprintf("Prometheus/%s", version.Version) - // Custom events metric - eventCount = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: metricsNamespace, - Name: "events_total", - Help: "The number of Kubernetes events handled.", - }, - []string{"role", "event"}, - ) - // DefaultSDConfig is the default Kubernetes SD configuration + // DefaultSDConfig is the default Kubernetes SD configuration. DefaultSDConfig = SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, } @@ -84,15 +70,6 @@ var ( func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(eventCount) - // Initialize metric vectors. - for _, role := range []string{"endpointslice", "endpoints", "node", "pod", "service", "ingress"} { - for _, evt := range []string{"add", "delete", "update"} { - eventCount.WithLabelValues(role, evt) - } - } - (&clientGoRequestMetricAdapter{}).Register(prometheus.DefaultRegisterer) - (&clientGoWorkqueueMetricsProvider{}).Register(prometheus.DefaultRegisterer) } // Role is role of the service in Kubernetes. @@ -121,6 +98,16 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { } } +func (c Role) String() string { + return string(c) +} + +const ( + MetricLabelRoleAdd = "add" + MetricLabelRoleDelete = "delete" + MetricLabelRoleUpdate = "update" +) + // SDConfig is the configuration for Kubernetes service discovery. type SDConfig struct { APIServer config.URL `yaml:"api_server,omitempty"` @@ -132,12 +119,17 @@ type SDConfig struct { AttachMetadata AttachMetadataConfig `yaml:"attach_metadata,omitempty"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return newDiscovererMetrics(reg, rmi) +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "kubernetes" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return New(opts.Logger, c) + return New(opts.Logger, opts.Metrics, c) } // SetDirectory joins any relative file paths with dir. @@ -274,6 +266,7 @@ type Discovery struct { selectors roleSelector ownNamespace string attachMetadata AttachMetadataConfig + metrics *kubernetesMetrics } func (d *Discovery) getNamespaces() []string { @@ -292,7 +285,12 @@ func (d *Discovery) getNamespaces() []string { } // New creates a new Kubernetes discovery for the given role. -func New(l log.Logger, conf *SDConfig) (*Discovery, error) { +func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { + m, ok := metrics.(*kubernetesMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + if l == nil { l = log.NewNopLogger() } @@ -309,7 +307,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { } case conf.APIServer.URL == nil: // Use the Kubernetes provided pod service account - // as described in https://kubernetes.io/docs/admin/service-accounts-admin/ + // as described in https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/#using-official-client-libraries kcfg, err = rest.InClusterConfig() if err != nil { return nil, err @@ -346,7 +344,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { return nil, err } - return &Discovery{ + d := &Discovery{ client: c, logger: l, role: conf.Role, @@ -355,7 +353,10 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { selectors: mapSelector(conf.Selectors), ownNamespace: ownNamespace, attachMetadata: conf.AttachMetadata, - }, nil + metrics: m, + } + + return d, nil } func mapSelector(rawSelector []SelectorConfig) roleSelector { @@ -391,59 +392,27 @@ const resyncDisabled = 0 // Run implements the discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { d.Lock() + namespaces := d.getNamespaces() switch d.role { case RoleEndpointSlice: - // Check "networking.k8s.io/v1" availability with retries. - // If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility - var v1Supported bool - if retryOnError(ctx, 10*time.Second, - func() (err error) { - v1Supported, err = checkDiscoveryV1Supported(d.client) - if err != nil { - level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err) - } - return err - }, - ) { - d.Unlock() - return - } - for _, namespace := range namespaces { var informer cache.SharedIndexInformer - if v1Supported { - e := d.client.DiscoveryV1().EndpointSlices(namespace) - elw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.Watch(ctx, options) - }, - } - informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{}) - } else { - e := d.client.DiscoveryV1beta1().EndpointSlices(namespace) - elw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.Watch(ctx, options) - }, - } - informer = d.newEndpointSlicesByNodeInformer(elw, &disv1beta1.EndpointSlice{}) + e := d.client.DiscoveryV1().EndpointSlices(namespace) + elw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.endpointslice.field + options.LabelSelector = d.selectors.endpointslice.label + return e.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.endpointslice.field + options.LabelSelector = d.selectors.endpointslice.label + return e.Watch(ctx, options) + }, } + informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{}) s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ @@ -479,9 +448,10 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { eps := NewEndpointSlice( log.With(d.logger, "role", "endpointslice"), informer, - cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), - cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), + d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), + d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, + d.metrics.eventCount, ) d.discoverers = append(d.discoverers, eps) go eps.endpointSliceInf.Run(ctx.Done()) @@ -538,9 +508,10 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { eps := NewEndpoints( log.With(d.logger, "role", "endpoint"), d.newEndpointsByNodeInformer(elw), - cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), - cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), + d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), + d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, + d.metrics.eventCount, ) d.discoverers = append(d.discoverers, eps) go eps.endpointsInf.Run(ctx.Done()) @@ -572,6 +543,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { log.With(d.logger, "role", "pod"), d.newPodsByNodeInformer(plw), nodeInformer, + d.metrics.eventCount, ) d.discoverers = append(d.discoverers, pod) go pod.podInf.Run(ctx.Done()) @@ -593,71 +565,40 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } svc := NewService( log.With(d.logger, "role", "service"), - cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), + d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), + d.metrics.eventCount, ) d.discoverers = append(d.discoverers, svc) go svc.informer.Run(ctx.Done()) } case RoleIngress: - // Check "networking.k8s.io/v1" availability with retries. - // If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility - var v1Supported bool - if retryOnError(ctx, 10*time.Second, - func() (err error) { - v1Supported, err = checkNetworkingV1Supported(d.client) - if err != nil { - level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err) - } - return err - }, - ) { - d.Unlock() - return - } - for _, namespace := range namespaces { var informer cache.SharedInformer - if v1Supported { - i := d.client.NetworkingV1().Ingresses(namespace) - ilw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.Watch(ctx, options) - }, - } - informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) - } else { - i := d.client.NetworkingV1beta1().Ingresses(namespace) - ilw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.Watch(ctx, options) - }, - } - informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled) + i := d.client.NetworkingV1().Ingresses(namespace) + ilw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label + return i.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label + return i.Watch(ctx, options) + }, } + informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) ingress := NewIngress( log.With(d.logger, "role", "ingress"), informer, + d.metrics.eventCount, ) d.discoverers = append(d.discoverers, ingress) go ingress.informer.Run(ctx.Done()) } case RoleNode: nodeInformer := d.newNodeInformer(ctx) - node := NewNode(log.With(d.logger, "role", "node"), nodeInformer) + node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.metrics.eventCount) d.discoverers = append(d.discoverers, node) go node.informer.Run(ctx.Done()) default: @@ -709,20 +650,6 @@ func retryOnError(ctx context.Context, interval time.Duration, f func() error) ( } } -func checkNetworkingV1Supported(client kubernetes.Interface) (bool, error) { - k8sVer, err := client.Discovery().ServerVersion() - if err != nil { - return false, err - } - semVer, err := utilversion.ParseSemantic(k8sVer.String()) - if err != nil { - return false, err - } - // networking.k8s.io/v1 is available since Kubernetes v1.19 - // https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md - return semVer.Major() >= 1 && semVer.Minor() >= 19, nil -} - func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer { nlw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { @@ -736,7 +663,7 @@ func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer { return d.client.CoreV1().Nodes().Watch(ctx, options) }, } - return cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled) + return d.mustNewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled) } func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer { @@ -751,13 +678,28 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde } } - return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers) + return d.mustNewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers) } func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) + indexers[podIndex] = func(obj interface{}) ([]string, error) { + e, ok := obj.(*apiv1.Endpoints) + if !ok { + return nil, fmt.Errorf("object is not endpoints") + } + var pods []string + for _, target := range e.Subsets { + for _, addr := range target.Addresses { + if addr.TargetRef != nil && addr.TargetRef.Kind == "Pod" { + pods = append(pods, namespacedName(addr.TargetRef.Namespace, addr.TargetRef.Name)) + } + } + } + return pods, nil + } if !d.attachMetadata.Node { - return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) + return d.mustNewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) } indexers[nodeIndex] = func(obj interface{}) ([]string, error) { @@ -783,13 +725,13 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share return nodes, nil } - return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) + return d.mustNewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) } func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if !d.attachMetadata.Node { - cache.NewSharedIndexInformer(plw, &disv1.EndpointSlice{}, resyncDisabled, indexers) + return d.mustNewSharedIndexInformer(plw, object, resyncDisabled, indexers) } indexers[nodeIndex] = func(obj interface{}) ([]string, error) { @@ -808,19 +750,6 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object } } } - case *disv1beta1.EndpointSlice: - for _, target := range e.Endpoints { - if target.TargetRef != nil { - switch target.TargetRef.Kind { - case "Pod": - if target.NodeName != nil { - nodes = append(nodes, *target.NodeName) - } - case "Node": - nodes = append(nodes, target.TargetRef.Name) - } - } - } default: return nil, fmt.Errorf("object is not an endpointslice") } @@ -828,22 +757,32 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object return nodes, nil } - return cache.NewSharedIndexInformer(plw, object, resyncDisabled, indexers) + return d.mustNewSharedIndexInformer(plw, object, resyncDisabled, indexers) } -func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { - k8sVer, err := client.Discovery().ServerVersion() - if err != nil { - return false, err +func (d *Discovery) informerWatchErrorHandler(r *cache.Reflector, err error) { + d.metrics.failuresCount.Inc() + cache.DefaultWatchErrorHandler(r, err) +} + +func (d *Discovery) mustNewSharedInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) cache.SharedInformer { + informer := cache.NewSharedInformer(lw, exampleObject, defaultEventHandlerResyncPeriod) + // Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand. + // Such a scenario would suggest an incorrect use of the API, thus the panic. + if err := informer.SetWatchErrorHandler(d.informerWatchErrorHandler); err != nil { + panic(err) } - semVer, err := utilversion.ParseSemantic(k8sVer.String()) - if err != nil { - return false, err + return informer +} + +func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + informer := cache.NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, indexers) + // Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand. + // Such a scenario would suggest an incorrect use of the API, thus the panic. + if err := informer.SetWatchErrorHandler(d.informerWatchErrorHandler); err != nil { + panic(err) } - // The discovery.k8s.io/v1beta1 API version of EndpointSlice will no longer be served in v1.25. - // discovery.k8s.io/v1 is available since Kubernetes v1.21 - // https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25 - return semVer.Major() >= 1 && semVer.Minor() >= 21, nil + return informer } func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) { @@ -861,3 +800,7 @@ func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotationpresent_"+ln)] = presentValue } } + +func namespacedName(namespace, name string) string { + return namespace + "/" + name +} diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index d0ed4c6ca1..a026366502 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -21,14 +21,20 @@ import ( "time" "github.com/go-kit/log" + prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/require" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/version" + "k8s.io/apimachinery/pkg/watch" fakediscovery "k8s.io/client-go/discovery/fake" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" + kubetesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/testutil" @@ -40,7 +46,7 @@ func TestMain(m *testing.M) { // makeDiscovery creates a kubernetes.Discovery instance for testing. func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) { - return makeDiscoveryWithVersion(role, nsDiscovery, "v1.22.0", objects...) + return makeDiscoveryWithVersion(role, nsDiscovery, "v1.25.0", objects...) } // makeDiscoveryWithVersion creates a kubernetes.Discovery instance with the specified kubernetes version for testing. @@ -49,13 +55,30 @@ func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery) fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: k8sVer} - return &Discovery{ + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := newDiscovererMetrics(reg, refreshMetrics) + err := metrics.Register() + if err != nil { + panic(err) + } + // TODO(ptodev): Unregister the metrics at the end of the test. + + kubeMetrics, ok := metrics.(*kubernetesMetrics) + if !ok { + panic("invalid discovery metrics type") + } + + d := &Discovery{ client: clientset, logger: log.NewNopLogger(), role: role, namespaceDiscovery: &nsDiscovery, ownNamespace: "own-ns", - }, clientset + metrics: kubeMetrics, + } + + return d, clientset } // makeDiscoveryWithMetadata creates a kubernetes.Discovery instance with the specified metadata config. @@ -109,17 +132,11 @@ func (d k8sDiscoveryTest) Run(t *testing.T) { } resChan := make(chan map[string]*targetgroup.Group) - go readResultWithTimeout(t, ch, d.expectedMaxItems, time.Second, resChan) + go readResultWithTimeout(t, ctx, ch, d.expectedMaxItems, time.Second, resChan) dd, ok := d.discovery.(hasSynced) - if !ok { - t.Errorf("discoverer does not implement hasSynced interface") - return - } - if !cache.WaitForCacheSync(ctx.Done(), dd.hasSynced) { - t.Errorf("discoverer failed to sync: %v", dd) - return - } + require.True(t, ok, "discoverer does not implement hasSynced interface") + require.True(t, cache.WaitForCacheSync(ctx.Done(), dd.hasSynced), "discoverer failed to sync: %v", dd) if d.afterStart != nil { d.afterStart() @@ -128,13 +145,18 @@ func (d k8sDiscoveryTest) Run(t *testing.T) { if d.expectedRes != nil { res := <-resChan requireTargetGroups(t, d.expectedRes, res) + } else { + // Stop readResultWithTimeout and wait for it. + cancel() + <-resChan } } // readResultWithTimeout reads all targetgroups from channel with timeout. // It merges targetgroups by source and sends the result to result channel. -func readResultWithTimeout(t *testing.T, ch <-chan []*targetgroup.Group, max int, timeout time.Duration, resChan chan<- map[string]*targetgroup.Group) { +func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) { res := make(map[string]*targetgroup.Group) + timeout := time.After(stopAfter) Loop: for { select { @@ -145,15 +167,18 @@ Loop: } res[tg.Source] = tg } - if len(res) == max { + if len(res) == maxGroups { // Reached max target groups we may get, break fast. break Loop } - case <-time.After(timeout): + case <-timeout: // Because we use queue, an object that is created then // deleted or updated may be processed only once. // So possibly we may skip events, timed out here. - t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max) + t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups) + break Loop + case <-ctx.Done(): + t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups) break Loop } } @@ -260,36 +285,38 @@ func TestRetryOnError(t *testing.T) { } } -func TestCheckNetworkingV1Supported(t *testing.T) { +func TestFailuresCountMetric(t *testing.T) { tests := []struct { - version string - wantSupported bool - wantErr bool + role Role + minFailedWatches int }{ - {version: "v1.18.0", wantSupported: false, wantErr: false}, - {version: "v1.18.1", wantSupported: false, wantErr: false}, - // networking v1 is supported since Kubernetes v1.19 - {version: "v1.19.0", wantSupported: true, wantErr: false}, - {version: "v1.20.0-beta.2", wantSupported: true, wantErr: false}, - // error patterns - {version: "", wantSupported: false, wantErr: true}, - {version: "<>", wantSupported: false, wantErr: true}, + {RoleNode, 1}, + {RolePod, 1}, + {RoleService, 1}, + {RoleEndpoint, 3}, + {RoleEndpointSlice, 3}, + {RoleIngress, 1}, } for _, tc := range tests { tc := tc - t.Run(tc.version, func(t *testing.T) { - clientset := fake.NewSimpleClientset() - fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery) - fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: tc.version} - supported, err := checkNetworkingV1Supported(clientset) - - if tc.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - require.Equal(t, tc.wantSupported, supported) + t.Run(string(tc.role), func(t *testing.T) { + t.Parallel() + + n, c := makeDiscovery(tc.role, NamespaceDiscovery{}) + // The counter is initialized and no failures at the beginning. + require.Equal(t, float64(0), prom_testutil.ToFloat64(n.metrics.failuresCount)) + + // Simulate an error on watch requests. + c.Discovery().(*fakediscovery.FakeDiscovery).PrependWatchReactor("*", func(action kubetesting.Action) (bool, watch.Interface, error) { + return true, nil, apierrors.NewUnauthorized("unauthorized") + }) + + // Start the discovery. + k8sDiscoveryTest{discovery: n}.Run(t) + + // At least the errors of the initial watches should be caught (watches are retried on errors). + require.GreaterOrEqual(t, prom_testutil.ToFloat64(n.metrics.failuresCount), float64(tc.minFailedWatches)) }) } } diff --git a/discovery/kubernetes/metrics.go b/discovery/kubernetes/metrics.go new file mode 100644 index 0000000000..fe419bc782 --- /dev/null +++ b/discovery/kubernetes/metrics.go @@ -0,0 +1,86 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*kubernetesMetrics)(nil) + +type kubernetesMetrics struct { + eventCount *prometheus.CounterVec + failuresCount prometheus.Counter + + metricRegisterer discovery.MetricRegisterer +} + +func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + m := &kubernetesMetrics{ + eventCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: discovery.KubernetesMetricsNamespace, + Name: "events_total", + Help: "The number of Kubernetes events handled.", + }, + []string{"role", "event"}, + ), + failuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: discovery.KubernetesMetricsNamespace, + Name: "failures_total", + Help: "The number of failed WATCH/LIST requests.", + }, + ), + } + + m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ + m.eventCount, + m.failuresCount, + }) + + // Initialize metric vectors. + for _, role := range []string{ + RoleEndpointSlice.String(), + RoleEndpoint.String(), + RoleNode.String(), + RolePod.String(), + RoleService.String(), + RoleIngress.String(), + } { + for _, evt := range []string{ + MetricLabelRoleAdd, + MetricLabelRoleDelete, + MetricLabelRoleUpdate, + } { + m.eventCount.WithLabelValues(role, evt) + } + } + + m.failuresCount.Add(0) + + return m +} + +// Register implements discovery.DiscovererMetrics. +func (m *kubernetesMetrics) Register() error { + return m.metricRegisterer.RegisterMetrics() +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *kubernetesMetrics) Unregister() { + m.metricRegisterer.UnregisterMetrics() +} diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index 6a20e7b1f2..74d87e22c4 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -22,6 +22,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" @@ -35,12 +36,6 @@ const ( NodeLegacyHostIP = "LegacyHostIP" ) -var ( - nodeAddCount = eventCount.WithLabelValues("node", "add") - nodeUpdateCount = eventCount.WithLabelValues("node", "update") - nodeDeleteCount = eventCount.WithLabelValues("node", "delete") -) - // Node discovers Kubernetes nodes. type Node struct { logger log.Logger @@ -50,11 +45,22 @@ type Node struct { } // NewNode returns a new node discovery. -func NewNode(l log.Logger, inf cache.SharedInformer) *Node { +func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { if l == nil { l = log.NewNopLogger() } - n := &Node{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("node")} + + nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd) + nodeUpdateCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleUpdate) + nodeDeleteCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleDelete) + + n := &Node{ + logger: l, + informer: inf, + store: inf.GetStore(), + queue: workqueue.NewNamed(RoleNode.String()), + } + _, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { nodeAddCount.Inc() @@ -96,7 +102,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for n.process(ctx, ch) { // nolint:revive + for n.process(ctx, ch) { } }() @@ -202,7 +208,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { // 5. NodeLegacyHostIP // 6. NodeHostName // -// Derived from k8s.io/kubernetes/pkg/util/node/node.go +// Derived from k8s.io/kubernetes/pkg/util/node/node.go. func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, error) { m := map[apiv1.NodeAddressType][]string{} for _, a := range node.Status.Addresses { diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 74f74c1f75..02990e415f 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -23,6 +23,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,12 +33,9 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -const nodeIndex = "node" - -var ( - podAddCount = eventCount.WithLabelValues("pod", "add") - podUpdateCount = eventCount.WithLabelValues("pod", "update") - podDeleteCount = eventCount.WithLabelValues("pod", "delete") +const ( + nodeIndex = "node" + podIndex = "pod" ) // Pod discovers new pod targets. @@ -51,18 +49,22 @@ type Pod struct { } // NewPod creates a new pod discovery. -func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer) *Pod { +func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { if l == nil { l = log.NewNopLogger() } + podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd) + podDeleteCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleDelete) + podUpdateCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleUpdate) + p := &Pod{ podInf: pods, nodeInf: nodes, withNodeMetadata: nodes != nil, store: pods.GetStore(), logger: l, - queue: workqueue.NewNamed("pod"), + queue: workqueue.NewNamed(RolePod.String()), } _, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { @@ -131,7 +133,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for p.process(ctx, ch) { // nolint:revive + for p.process(ctx, ch) { } }() @@ -327,7 +329,7 @@ func podSource(pod *apiv1.Pod) string { } func podSourceFromNamespaceAndName(namespace, name string) string { - return "pod/" + namespace + "/" + name + return "pod/" + namespacedName(namespace, name) } func podReady(pod *apiv1.Pod) model.LabelValue { diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 7addf0054e..51204a5a1a 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -22,6 +22,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" @@ -30,12 +31,6 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - svcAddCount = eventCount.WithLabelValues("service", "add") - svcUpdateCount = eventCount.WithLabelValues("service", "update") - svcDeleteCount = eventCount.WithLabelValues("service", "delete") -) - // Service implements discovery of Kubernetes services. type Service struct { logger log.Logger @@ -45,11 +40,22 @@ type Service struct { } // NewService returns a new service discovery. -func NewService(l log.Logger, inf cache.SharedInformer) *Service { +func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { if l == nil { l = log.NewNopLogger() } - s := &Service{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("service")} + + svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) + svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate) + svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete) + + s := &Service{ + logger: l, + informer: inf, + store: inf.GetStore(), + queue: workqueue.NewNamed(RoleService.String()), + } + _, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { svcAddCount.Inc() @@ -91,7 +97,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for s.process(ctx, ch) { // nolint:revive + for s.process(ctx, ch) { } }() diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go deleted file mode 100644 index 87823f4010..0000000000 --- a/discovery/legacymanager/manager.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "context" - "fmt" - "reflect" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" -) - -var ( - failedConfigs = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_sd_failed_configs", - Help: "Current number of service discovery configurations that failed to load.", - }, - []string{"name"}, - ) - discoveredTargets = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_sd_discovered_targets", - Help: "Current number of discovered targets.", - }, - []string{"name", "config"}, - ) - receivedUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_received_updates_total", - Help: "Total number of update events received from the SD providers.", - }, - []string{"name"}, - ) - delayedUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_updates_delayed_total", - Help: "Total number of update events that couldn't be sent immediately.", - }, - []string{"name"}, - ) - sentUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_updates_total", - Help: "Total number of update events sent to the SD consumers.", - }, - []string{"name"}, - ) -) - -func RegisterMetrics() { - prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates) -} - -type poolKey struct { - setName string - provider string -} - -// provider holds a Discoverer instance, its configuration and its subscribers. -type provider struct { - name string - d discovery.Discoverer - subs []string - config interface{} -} - -// NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager)) *Manager { - if logger == nil { - logger = log.NewNopLogger() - } - mgr := &Manager{ - logger: logger, - syncCh: make(chan map[string][]*targetgroup.Group), - targets: make(map[poolKey]map[string]*targetgroup.Group), - discoverCancel: []context.CancelFunc{}, - ctx: ctx, - updatert: 5 * time.Second, - triggerSend: make(chan struct{}, 1), - } - for _, option := range options { - option(mgr) - } - return mgr -} - -// Name sets the name of the manager. -func Name(n string) func(*Manager) { - return func(m *Manager) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.name = n - } -} - -// Manager maintains a set of discovery providers and sends each update to a map channel. -// Targets are grouped by the target set name. -type Manager struct { - logger log.Logger - name string - mtx sync.RWMutex - ctx context.Context - discoverCancel []context.CancelFunc - - // Some Discoverers(eg. k8s) send only the updates for a given target group - // so we use map[tg.Source]*targetgroup.Group to know which group to update. - targets map[poolKey]map[string]*targetgroup.Group - // providers keeps track of SD providers. - providers []*provider - // The sync channel sends the updates as a map where the key is the job value from the scrape config. - syncCh chan map[string][]*targetgroup.Group - - // How long to wait before sending updates to the channel. The variable - // should only be modified in unit tests. - updatert time.Duration - - // The triggerSend channel signals to the manager that new updates have been received from providers. - triggerSend chan struct{} -} - -// Run starts the background processing -func (m *Manager) Run() error { - go m.sender() - for range m.ctx.Done() { - m.cancelDiscoverers() - return m.ctx.Err() - } - return nil -} - -// SyncCh returns a read only channel used by all the clients to receive target updates. -func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group { - return m.syncCh -} - -// ApplyConfig removes all running discovery providers and starts new ones using the provided config. -func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - for pk := range m.targets { - if _, ok := cfg[pk.setName]; !ok { - discoveredTargets.DeleteLabelValues(m.name, pk.setName) - } - } - m.cancelDiscoverers() - m.targets = make(map[poolKey]map[string]*targetgroup.Group) - m.providers = nil - m.discoverCancel = nil - - failedCount := 0 - for name, scfg := range cfg { - failedCount += m.registerProviders(scfg, name) - discoveredTargets.WithLabelValues(m.name, name).Set(0) - } - failedConfigs.WithLabelValues(m.name).Set(float64(failedCount)) - - for _, prov := range m.providers { - m.startProvider(m.ctx, prov) - } - - return nil -} - -// StartCustomProvider is used for sdtool. Only use this if you know what you're doing. -func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker discovery.Discoverer) { - p := &provider{ - name: name, - d: worker, - subs: []string{name}, - } - m.providers = append(m.providers, p) - m.startProvider(ctx, p) -} - -func (m *Manager) startProvider(ctx context.Context, p *provider) { - level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) - ctx, cancel := context.WithCancel(ctx) - updates := make(chan []*targetgroup.Group) - - m.discoverCancel = append(m.discoverCancel, cancel) - - go p.d.Run(ctx, updates) - go m.updater(ctx, p, updates) -} - -func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) { - for { - select { - case <-ctx.Done(): - return - case tgs, ok := <-updates: - receivedUpdates.WithLabelValues(m.name).Inc() - if !ok { - level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) - return - } - - for _, s := range p.subs { - m.updateGroup(poolKey{setName: s, provider: p.name}, tgs) - } - - select { - case m.triggerSend <- struct{}{}: - default: - } - } - } -} - -func (m *Manager) sender() { - ticker := time.NewTicker(m.updatert) - defer ticker.Stop() - - for { - select { - case <-m.ctx.Done(): - return - case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker. - select { - case <-m.triggerSend: - sentUpdates.WithLabelValues(m.name).Inc() - select { - case m.syncCh <- m.allGroups(): - default: - delayedUpdates.WithLabelValues(m.name).Inc() - level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") - select { - case m.triggerSend <- struct{}{}: - default: - } - } - default: - } - } - } -} - -func (m *Manager) cancelDiscoverers() { - for _, c := range m.discoverCancel { - c() - } -} - -func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if _, ok := m.targets[poolKey]; !ok { - m.targets[poolKey] = make(map[string]*targetgroup.Group) - } - for _, tg := range tgs { - if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. - m.targets[poolKey][tg.Source] = tg - } - } -} - -func (m *Manager) allGroups() map[string][]*targetgroup.Group { - m.mtx.RLock() - defer m.mtx.RUnlock() - - tSets := map[string][]*targetgroup.Group{} - n := map[string]int{} - for pkey, tsets := range m.targets { - for _, tg := range tsets { - // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' - // to signal that it needs to stop all scrape loops for this target set. - tSets[pkey.setName] = append(tSets[pkey.setName], tg) - n[pkey.setName] += len(tg.Targets) - } - } - for setName, v := range n { - discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v)) - } - return tSets -} - -// registerProviders returns a number of failed SD config. -func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int { - var ( - failed int - added bool - ) - add := func(cfg discovery.Config) { - for _, p := range m.providers { - if reflect.DeepEqual(cfg, p.config) { - p.subs = append(p.subs, setName) - added = true - return - } - } - typ := cfg.Name() - d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), - }) - if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) - failed++ - return - } - m.providers = append(m.providers, &provider{ - name: fmt.Sprintf("%s/%d", typ, len(m.providers)), - d: d, - config: cfg, - subs: []string{setName}, - }) - added = true - } - for _, cfg := range cfgs { - add(cfg) - } - if !added { - // Add an empty target group to force the refresh of the corresponding - // scrape pool and to notify the receiver that this target set has no - // current targets. - // It can happen because the combined set of SD configurations is empty - // or because we fail to instantiate all the SD configurations. - add(discovery.StaticConfig{{}}) - } - return failed -} - -// StaticProvider holds a list of target groups that never change. -type StaticProvider struct { - TargetGroups []*targetgroup.Group -} - -// Run implements the Worker interface. -func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { - // We still have to consider that the consumer exits right away in which case - // the context will be canceled. - select { - case ch <- sd.TargetGroups: - case <-ctx.Done(): - } - close(ch) -} diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go deleted file mode 100644 index 13b84e6e36..0000000000 --- a/discovery/legacymanager/manager_test.go +++ /dev/null @@ -1,1136 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "context" - "fmt" - "sort" - "strconv" - "testing" - "time" - - "github.com/go-kit/log" - client_testutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/testutil" -) - -func TestMain(m *testing.M) { - testutil.TolerantVerifyLeak(m) -} - -// TestTargetUpdatesOrder checks that the target updates are received in the expected order. -func TestTargetUpdatesOrder(t *testing.T) { - // The order by which the updates are send is determined by the interval passed to the mock discovery adapter - // Final targets array is ordered alphabetically by the name of the discoverer. - // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. - testCases := []struct { - title string - updates map[string][]update - expectedTargets [][]*targetgroup.Group - }{ - { - title: "Single TP no updates", - updates: map[string][]update{ - "tp1": {}, - }, - expectedTargets: nil, - }, - { - title: "Multiple TPs no updates", - updates: map[string][]update{ - "tp1": {}, - "tp2": {}, - "tp3": {}, - }, - expectedTargets: nil, - }, - { - title: "Single TP empty initials", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{}, - interval: 5 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - {}, - }, - }, - { - title: "Multiple TPs empty initials", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{}, - interval: 5 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{}, - interval: 200 * time.Millisecond, - }, - }, - "tp3": { - { - targetGroups: []targetgroup.Group{}, - interval: 100 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - {}, - {}, - {}, - }, - }, - { - title: "Single TP initials only", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - { - title: "Multiple TPs initials only", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - }, - }, - }, - { - title: "Single TP initials followed by empty updates", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 0, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - }, - }, - { - title: "Single TP initials and new groups", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 0, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - title: "Multiple TPs initials and new groups", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group4", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 500 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - interval: 100 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group4", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - }, - }, - { - title: "One TP initials arrive after other TP updates.", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 150 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - interval: 200 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - interval: 100 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - }, - }, - - { - title: "Single TP empty update in between", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 30 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 300 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - }, - }, - } - - for i, tc := range testCases { - tc := tc - t.Run(tc.title, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - discoveryManager := NewManager(ctx, log.NewNopLogger()) - discoveryManager.updatert = 100 * time.Millisecond - - var totalUpdatesCount int - for _, up := range tc.updates { - if len(up) > 0 { - totalUpdatesCount += len(up) - } - } - provUpdates := make(chan []*targetgroup.Group, totalUpdatesCount) - - for _, up := range tc.updates { - go newMockDiscoveryProvider(up...).Run(ctx, provUpdates) - } - - for x := 0; x < totalUpdatesCount; x++ { - select { - case <-ctx.Done(): - t.Fatalf("%d: no update arrived within the timeout limit", x) - case tgs := <-provUpdates: - discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) - for _, got := range discoveryManager.allGroups() { - assertEqualGroups(t, got, tc.expectedTargets[x]) - } - } - } - }) - } -} - -func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) { - t.Helper() - - // Need to sort by the groups's source as the received order is not guaranteed. - sort.Sort(byGroupSource(got)) - sort.Sort(byGroupSource(expected)) - - require.Equal(t, expected, got) -} - -func staticConfig(addrs ...string) discovery.StaticConfig { - var cfg discovery.StaticConfig - for i, addr := range addrs { - cfg = append(cfg, &targetgroup.Group{ - Source: fmt.Sprint(i), - Targets: []model.LabelSet{ - {model.AddressLabel: model.LabelValue(addr)}, - }, - }) - } - return cfg -} - -func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) { - t.Helper() - if _, ok := tSets[poolKey]; !ok { - t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) - return - } - - match := false - var mergedTargets string - for _, targetGroup := range tSets[poolKey] { - for _, l := range targetGroup.Targets { - mergedTargets = mergedTargets + " " + l.String() - if l.String() == label { - match = true - } - } - } - if match != present { - msg := "" - if !present { - msg = "not" - } - t.Fatalf("%q should %s be present in Targets labels: %q", label, msg, mergedTargets) - } -} - -func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090", "bar:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - - c["prometheus"] = discovery.Configs{ - staticConfig("foo:9090"), - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false) -} - -func TestDiscovererConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090", "bar:9090"), - staticConfig("baz:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true) -} - -// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after -// removing all targets from the static_configs sends an update with empty targetGroups. -// This is required to signal the receiver that this target set has no current targets. -func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - - c["prometheus"] = discovery.Configs{ - discovery.StaticConfig{{}}, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - - pkey := poolKey{setName: "prometheus", provider: "static/0"} - targetGroups, ok := discoveryManager.targets[pkey] - if !ok { - t.Fatalf("'%v' should be present in target groups", pkey) - } - group, ok := targetGroups[""] - if !ok { - t.Fatalf("missing '' key in target groups %v", targetGroups) - } - - if len(group.Targets) != 0 { - t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) - } -} - -func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - discoveryManager := NewManager(ctx, nil) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090"), - }, - "prometheus2": { - staticConfig("foo:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - if len(discoveryManager.providers) != 1 { - t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) - } -} - -func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { - originalConfig := discovery.Configs{ - staticConfig("foo:9090", "bar:9090", "baz:9090"), - } - processedConfig := discovery.Configs{ - staticConfig("foo:9090", "bar:9090", "baz:9090"), - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - cfgs := map[string]discovery.Configs{ - "prometheus": processedConfig, - } - discoveryManager.ApplyConfig(cfgs) - <-discoveryManager.SyncCh() - - for _, cfg := range cfgs { - require.Equal(t, originalConfig, cfg) - } -} - -type errorConfig struct{ err error } - -func (e errorConfig) Name() string { return "error" } -func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Discoverer, error) { - return nil, e.err -} - -func TestGaugeFailedConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - errorConfig{fmt.Errorf("tests error 0")}, - errorConfig{fmt.Errorf("tests error 1")}, - errorConfig{fmt.Errorf("tests error 2")}, - }, - } - discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - failedCount := client_testutil.ToFloat64(failedConfigs) - if failedCount != 3 { - t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount) - } - - c["prometheus"] = discovery.Configs{ - staticConfig("foo:9090"), - } - discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - failedCount = client_testutil.ToFloat64(failedConfigs) - if failedCount != 0 { - t.Fatalf("Expected to get no failed config, got: %v", failedCount) - } -} - -func TestCoordinationWithReceiver(t *testing.T) { - updateDelay := 100 * time.Millisecond - - type expect struct { - delay time.Duration - tgs map[string][]*targetgroup.Group - } - - testCases := []struct { - title string - providers map[string]discovery.Discoverer - expected []expect - }{ - { - title: "Receiver should get all updates even when one provider closes its channel", - providers: map[string]discovery.Discoverer{ - "once1": &onceProvider{ - tgs: []*targetgroup.Group{ - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - "mock1": newMockDiscoveryProvider( - update{ - interval: 2 * updateDelay, - targetGroups: []targetgroup.Group{ - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - ), - }, - expected: []expect{ - { - tgs: map[string][]*targetgroup.Group{ - "once1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - tgs: map[string][]*targetgroup.Group{ - "once1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - "mock1": { - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - }, - { - title: "Receiver should get all updates even when the channel is blocked", - providers: map[string]discovery.Discoverer{ - "mock1": newMockDiscoveryProvider( - update{ - targetGroups: []targetgroup.Group{ - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - update{ - interval: 4 * updateDelay, - targetGroups: []targetgroup.Group{ - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - ), - }, - expected: []expect{ - { - delay: 2 * updateDelay, - tgs: map[string][]*targetgroup.Group{ - "mock1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - delay: 4 * updateDelay, - tgs: map[string][]*targetgroup.Group{ - "mock1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.title, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - mgr := NewManager(ctx, nil) - mgr.updatert = updateDelay - go mgr.Run() - - for name, p := range tc.providers { - mgr.StartCustomProvider(ctx, name, p) - } - - for i, expected := range tc.expected { - time.Sleep(expected.delay) - select { - case <-ctx.Done(): - t.Fatalf("step %d: no update received in the expected timeframe", i) - case tgs, ok := <-mgr.SyncCh(): - if !ok { - t.Fatalf("step %d: discovery manager channel is closed", i) - } - if len(tgs) != len(expected.tgs) { - t.Fatalf("step %d: target groups mismatch, got: %d, expected: %d\ngot: %#v\nexpected: %#v", - i, len(tgs), len(expected.tgs), tgs, expected.tgs) - } - for k := range expected.tgs { - if _, ok := tgs[k]; !ok { - t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) - } - assertEqualGroups(t, tgs[k], expected.tgs[k]) - } - } - } - }) - } -} - -type update struct { - targetGroups []targetgroup.Group - interval time.Duration -} - -type mockdiscoveryProvider struct { - updates []update -} - -func newMockDiscoveryProvider(updates ...update) mockdiscoveryProvider { - tp := mockdiscoveryProvider{ - updates: updates, - } - return tp -} - -func (tp mockdiscoveryProvider) Run(ctx context.Context, upCh chan<- []*targetgroup.Group) { - for _, u := range tp.updates { - if u.interval > 0 { - select { - case <-ctx.Done(): - return - case <-time.After(u.interval): - } - } - tgs := make([]*targetgroup.Group, len(u.targetGroups)) - for i := range u.targetGroups { - tgs[i] = &u.targetGroups[i] - } - upCh <- tgs - } - <-ctx.Done() -} - -// byGroupSource implements sort.Interface so we can sort by the Source field. -type byGroupSource []*targetgroup.Group - -func (a byGroupSource) Len() int { return len(a) } -func (a byGroupSource) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byGroupSource) Less(i, j int) bool { return a[i].Source < a[j].Source } - -// onceProvider sends updates once (if any) and closes the update channel. -type onceProvider struct { - tgs []*targetgroup.Group -} - -func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { - if len(o.tgs) > 0 { - ch <- o.tgs - } - close(ch) -} diff --git a/discovery/legacymanager/registry.go b/discovery/legacymanager/registry.go deleted file mode 100644 index 955705394d..0000000000 --- a/discovery/legacymanager/registry.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "sync" - - "gopkg.in/yaml.v2" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" -) - -const ( - configFieldPrefix = "AUTO_DISCOVERY_" - staticConfigsKey = "static_configs" - staticConfigsFieldName = configFieldPrefix + staticConfigsKey -) - -var ( - configNames = make(map[string]discovery.Config) - configFieldNames = make(map[reflect.Type]string) - configFields []reflect.StructField - - configTypesMu sync.Mutex - configTypes = make(map[reflect.Type]reflect.Type) - - emptyStructType = reflect.TypeOf(struct{}{}) - configsType = reflect.TypeOf(discovery.Configs{}) -) - -// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling. -func RegisterConfig(config discovery.Config) { - registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config) -} - -func init() { - // N.B.: static_configs is the only Config type implemented by default. - // All other types are registered at init by their implementing packages. - elemTyp := reflect.TypeOf(&targetgroup.Group{}) - registerConfig(staticConfigsKey, elemTyp, discovery.StaticConfig{}) -} - -func registerConfig(yamlKey string, elemType reflect.Type, config discovery.Config) { - name := config.Name() - if _, ok := configNames[name]; ok { - panic(fmt.Sprintf("discovery: Config named %q is already registered", name)) - } - configNames[name] = config - - fieldName := configFieldPrefix + yamlKey // Field must be exported. - configFieldNames[elemType] = fieldName - - // Insert fields in sorted order. - i := sort.Search(len(configFields), func(k int) bool { - return fieldName < configFields[k].Name - }) - configFields = append(configFields, reflect.StructField{}) // Add empty field at end. - copy(configFields[i+1:], configFields[i:]) // Shift fields to the right. - configFields[i] = reflect.StructField{ // Write new field in place. - Name: fieldName, - Type: reflect.SliceOf(elemType), - Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`), - } -} - -func getConfigType(out reflect.Type) reflect.Type { - configTypesMu.Lock() - defer configTypesMu.Unlock() - if typ, ok := configTypes[out]; ok { - return typ - } - // Initial exported fields map one-to-one. - var fields []reflect.StructField - for i, n := 0, out.NumField(); i < n; i++ { - switch field := out.Field(i); { - case field.PkgPath == "" && field.Type != configsType: - fields = append(fields, field) - default: - fields = append(fields, reflect.StructField{ - Name: "_" + field.Name, // Field must be unexported. - PkgPath: out.PkgPath(), - Type: emptyStructType, - }) - } - } - // Append extra config fields on the end. - fields = append(fields, configFields...) - typ := reflect.StructOf(fields) - configTypes[out] = typ - return typ -} - -// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs -// that have a Configs field that should be inlined. -func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error { - outVal := reflect.ValueOf(out) - if outVal.Kind() != reflect.Ptr { - return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) - } - outVal = outVal.Elem() - if outVal.Kind() != reflect.Struct { - return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) - } - outTyp := outVal.Type() - - cfgTyp := getConfigType(outTyp) - cfgPtr := reflect.New(cfgTyp) - cfgVal := cfgPtr.Elem() - - // Copy shared fields (defaults) to dynamic value. - var configs *discovery.Configs - for i, n := 0, outVal.NumField(); i < n; i++ { - if outTyp.Field(i).Type == configsType { - configs = outVal.Field(i).Addr().Interface().(*discovery.Configs) - continue - } - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - cfgVal.Field(i).Set(outVal.Field(i)) - } - if configs == nil { - return fmt.Errorf("discovery: Configs field not found in type: %T", out) - } - - // Unmarshal into dynamic value. - if err := unmarshal(cfgPtr.Interface()); err != nil { - return replaceYAMLTypeError(err, cfgTyp, outTyp) - } - - // Copy shared fields from dynamic value. - for i, n := 0, outVal.NumField(); i < n; i++ { - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - outVal.Field(i).Set(cfgVal.Field(i)) - } - - var err error - *configs, err = readConfigs(cfgVal, outVal.NumField()) - return err -} - -func readConfigs(structVal reflect.Value, startField int) (discovery.Configs, error) { - var ( - configs discovery.Configs - targets []*targetgroup.Group - ) - for i, n := startField, structVal.NumField(); i < n; i++ { - field := structVal.Field(i) - if field.Kind() != reflect.Slice { - panic("discovery: internal error: field is not a slice") - } - for k := 0; k < field.Len(); k++ { - val := field.Index(k) - if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) { - key := configFieldNames[field.Type().Elem()] - key = strings.TrimPrefix(key, configFieldPrefix) - return nil, fmt.Errorf("empty or null section in %s", key) - } - switch c := val.Interface().(type) { - case *targetgroup.Group: - // Add index to the static config target groups for unique identification - // within scrape pool. - c.Source = strconv.Itoa(len(targets)) - // Coalesce multiple static configs into a single static config. - targets = append(targets, c) - case discovery.Config: - configs = append(configs, c) - default: - panic("discovery: internal error: slice element is not a Config") - } - } - } - if len(targets) > 0 { - configs = append(configs, discovery.StaticConfig(targets)) - } - return configs, nil -} - -// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs -// that have a Configs field that should be inlined. -func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) { - inVal := reflect.ValueOf(in) - for inVal.Kind() == reflect.Ptr { - inVal = inVal.Elem() - } - inTyp := inVal.Type() - - cfgTyp := getConfigType(inTyp) - cfgPtr := reflect.New(cfgTyp) - cfgVal := cfgPtr.Elem() - - // Copy shared fields to dynamic value. - var configs *discovery.Configs - for i, n := 0, inTyp.NumField(); i < n; i++ { - if inTyp.Field(i).Type == configsType { - configs = inVal.Field(i).Addr().Interface().(*discovery.Configs) - } - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - cfgVal.Field(i).Set(inVal.Field(i)) - } - if configs == nil { - return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in) - } - - if err := writeConfigs(cfgVal, *configs); err != nil { - return nil, err - } - - return cfgPtr.Interface(), nil -} - -func writeConfigs(structVal reflect.Value, configs discovery.Configs) error { - targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group) - for _, c := range configs { - if sc, ok := c.(discovery.StaticConfig); ok { - *targets = append(*targets, sc...) - continue - } - fieldName, ok := configFieldNames[reflect.TypeOf(c)] - if !ok { - return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c) - } - field := structVal.FieldByName(fieldName) - field.Set(reflect.Append(field, reflect.ValueOf(c))) - } - return nil -} - -func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { - var e *yaml.TypeError - if errors.As(err, &e) { - oldStr := oldTyp.String() - newStr := newTyp.String() - for i, s := range e.Errors { - e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) - } - } - return err -} diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 63213c87b2..634a6b1d4b 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -51,6 +51,7 @@ const ( linodeLabelStatus = linodeLabel + "status" linodeLabelTags = linodeLabel + "tags" linodeLabelGroup = linodeLabel + "group" + linodeLabelGPUs = linodeLabel + "gpus" linodeLabelHypervisor = linodeLabel + "hypervisor" linodeLabelBackups = linodeLabel + "backups" linodeLabelSpecsDiskBytes = linodeLabel + "specs_disk_bytes" @@ -58,32 +59,28 @@ const ( linodeLabelSpecsVCPUs = linodeLabel + "specs_vcpus" linodeLabelSpecsTransferBytes = linodeLabel + "specs_transfer_bytes" linodeLabelExtraIPs = linodeLabel + "extra_ips" + linodeLabelIPv6Ranges = linodeLabel + "ipv6_ranges" // This is our events filter; when polling for changes, we care only about // events since our last refresh. - // Docs: https://www.linode.com/docs/api/account/#events-list + // Docs: https://www.linode.com/docs/api/account/#events-list. filterTemplate = `{"created": {"+gte": "%s"}}` + + // Optional region filtering. + regionFilterTemplate = `{"region": "%s"}` ) // DefaultSDConfig is the default Linode SD configuration. -var ( - DefaultSDConfig = SDConfig{ - TagSeparator: ",", - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), - HTTPClientConfig: config.DefaultHTTPClientConfig, - } - - failuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_linode_failures_total", - Help: "Number of Linode service discovery refresh failures.", - }) -) +var DefaultSDConfig = SDConfig{ + TagSeparator: ",", + Port: 80, + Region: "", + RefreshInterval: model.Duration(60 * time.Second), + HTTPClientConfig: config.DefaultHTTPClientConfig, +} func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(failuresCount) } // SDConfig is the configuration for Linode based service discovery. @@ -93,6 +90,12 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` TagSeparator string `yaml:"tag_separator,omitempty"` + Region string `yaml:"region,omitempty"` +} + +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return newDiscovererMetrics(reg, rmi) } // Name returns the name of the Config. @@ -100,7 +103,7 @@ func (*SDConfig) Name() string { return "linode" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -125,21 +128,30 @@ type Discovery struct { *refresh.Discovery client *linodego.Client port int + region string tagSeparator string lastRefreshTimestamp time.Time pollCount int lastResults []*targetgroup.Group eventPollingEnabled bool + metrics *linodeMetrics } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*linodeMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + d := &Discovery{ port: conf.Port, + region: conf.Region, tagSeparator: conf.TagSeparator, pollCount: 0, lastRefreshTimestamp: time.Now().UTC(), eventPollingEnabled: true, + metrics: m, } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd") @@ -157,10 +169,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { d.client = &client d.Discovery = refresh.NewDiscovery( - logger, - "linode", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "linode", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } @@ -171,12 +186,12 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { if d.lastResults != nil && d.eventPollingEnabled { // Check to see if there have been any events. If so, refresh our data. - opts := linodego.ListOptions{ + eventsOpts := linodego.ListOptions{ PageOptions: &linodego.PageOptions{Page: 1}, PageSize: 25, Filter: fmt.Sprintf(filterTemplate, d.lastRefreshTimestamp.Format("2006-01-02T15:04:05")), } - events, err := d.client.ListEvents(ctx, &opts) + events, err := d.client.ListEvents(ctx, &eventsOpts) if err != nil { var e *linodego.Error if errors.As(err, &e) && e.Code == http.StatusUnauthorized { @@ -217,18 +232,42 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro tg := &targetgroup.Group{ Source: "Linode", } + // We need 3 of these because Linodego writes into the structure during pagination + listInstancesOpts := linodego.ListOptions{ + PageSize: 500, + } + listIPAddressesOpts := linodego.ListOptions{ + PageSize: 500, + } + listIPv6RangesOpts := linodego.ListOptions{ + PageSize: 500, + } + + // If region filter provided, use it to constrain results. + if d.region != "" { + listInstancesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) + listIPAddressesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) + listIPv6RangesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) + } // Gather all linode instances. - instances, err := d.client.ListInstances(ctx, &linodego.ListOptions{PageSize: 500}) + instances, err := d.client.ListInstances(ctx, &listInstancesOpts) if err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, err } // Gather detailed IP address info for all IPs on all linode instances. - detailedIPs, err := d.client.ListIPAddresses(ctx, &linodego.ListOptions{PageSize: 500}) + detailedIPs, err := d.client.ListIPAddresses(ctx, &listIPAddressesOpts) + if err != nil { + d.metrics.failuresCount.Inc() + return nil, err + } + + // Gather detailed IPv6 Range info for all linode instances. + ipv6RangeList, err := d.client.ListIPv6Ranges(ctx, &listIPv6RangesOpts) if err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, err } @@ -241,7 +280,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro privateIPv4, publicIPv4, publicIPv6 string privateIPv4RDNS, publicIPv4RDNS, publicIPv6RDNS string backupsStatus string - extraIPs []string + extraIPs, ipv6Ranges []string ) for _, ip := range instance.IPv4 { @@ -269,17 +308,23 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro } if instance.IPv6 != "" { + slaac := strings.Split(instance.IPv6, "/")[0] for _, detailedIP := range detailedIPs { - if detailedIP.Address != strings.Split(instance.IPv6, "/")[0] { + if detailedIP.Address != slaac { continue } - publicIPv6 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { publicIPv6RDNS = detailedIP.RDNS } } + for _, ipv6Range := range ipv6RangeList { + if ipv6Range.RouteTarget != slaac { + continue + } + ipv6Ranges = append(ipv6Ranges, fmt.Sprintf("%s/%d", ipv6Range.Range, ipv6Range.Prefix)) + } } if instance.Backups.Enabled { @@ -289,7 +334,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro } labels := model.LabelSet{ - linodeLabelID: model.LabelValue(fmt.Sprintf("%d", instance.ID)), + linodeLabelID: model.LabelValue(strconv.Itoa(instance.ID)), linodeLabelName: model.LabelValue(instance.Label), linodeLabelImage: model.LabelValue(instance.Image), linodeLabelPrivateIPv4: model.LabelValue(privateIPv4), @@ -302,12 +347,13 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro linodeLabelType: model.LabelValue(instance.Type), linodeLabelStatus: model.LabelValue(instance.Status), linodeLabelGroup: model.LabelValue(instance.Group), + linodeLabelGPUs: model.LabelValue(strconv.Itoa(instance.Specs.GPUs)), linodeLabelHypervisor: model.LabelValue(instance.Hypervisor), linodeLabelBackups: model.LabelValue(backupsStatus), - linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)), - linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)), - linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)), - linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)), + linodeLabelSpecsDiskBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Disk)<<20, 10)), + linodeLabelSpecsMemoryBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Memory)<<20, 10)), + linodeLabelSpecsVCPUs: model.LabelValue(strconv.Itoa(instance.Specs.VCPUs)), + linodeLabelSpecsTransferBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Transfer)<<20, 10)), } addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10)) @@ -322,12 +368,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro if len(extraIPs) > 0 { // This instance has more than one of at least one type of IP address (public, private, - // IPv4, IPv6, etc. We provide those extra IPs found here just like we do for instance + // IPv4,etc. We provide those extra IPs found here just like we do for instance // tags, we surround a separated list with the tagSeparator config. ips := d.tagSeparator + strings.Join(extraIPs, d.tagSeparator) + d.tagSeparator labels[linodeLabelExtraIPs] = model.LabelValue(ips) } + if len(ipv6Ranges) > 0 { + // This instance has more than one IPv6 Ranges routed to it we provide these + // Ranges found here just like we do for instance tags, we surround a separated + // list with the tagSeparator config. + ips := d.tagSeparator + strings.Join(ipv6Ranges, d.tagSeparator) + d.tagSeparator + labels[linodeLabelIPv6Ranges] = model.LabelValue(ips) + } + tg.Targets = append(tg.Targets, labels) } return []*targetgroup.Group{tg}, nil diff --git a/discovery/linode/linode_test.go b/discovery/linode/linode_test.go index 67eb8198e8..3c10650653 100644 --- a/discovery/linode/linode_test.go +++ b/discovery/linode/linode_test.go @@ -20,152 +20,244 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" -) -type LinodeSDTestSuite struct { - Mock *SDMock -} + "github.com/prometheus/prometheus/discovery" +) -func (s *LinodeSDTestSuite) TearDownSuite() { - s.Mock.ShutdownServer() -} +func TestLinodeSDRefresh(t *testing.T) { + sdmock := NewSDMock(t) + sdmock.Setup() -func (s *LinodeSDTestSuite) SetupTest(t *testing.T) { - s.Mock = NewSDMock(t) - s.Mock.Setup() + tests := map[string]struct { + region string + targetCount int + want []model.LabelSet + }{ + "no_region": {region: "", targetCount: 4, want: []model.LabelSet{ + { + "__address__": model.LabelValue("45.33.82.151:80"), + "__meta_linode_instance_id": model.LabelValue("26838044"), + "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-1"), + "__meta_linode_image": model.LabelValue("linode/arch"), + "__meta_linode_private_ipv4": model.LabelValue("192.168.170.51"), + "__meta_linode_public_ipv4": model.LabelValue("45.33.82.151"), + "__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:1382"), + "__meta_linode_private_ipv4_rdns": model.LabelValue(""), + "__meta_linode_public_ipv4_rdns": model.LabelValue("li1028-151.members.linode.com"), + "__meta_linode_public_ipv6_rdns": model.LabelValue(""), + "__meta_linode_region": model.LabelValue("us-east"), + "__meta_linode_type": model.LabelValue("g6-standard-2"), + "__meta_linode_status": model.LabelValue("running"), + "__meta_linode_tags": model.LabelValue(",monitoring,"), + "__meta_linode_group": model.LabelValue(""), + "__meta_linode_gpus": model.LabelValue("0"), + "__meta_linode_hypervisor": model.LabelValue("kvm"), + "__meta_linode_backups": model.LabelValue("disabled"), + "__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"), + "__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"), + "__meta_linode_specs_vcpus": model.LabelValue("2"), + "__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"), + "__meta_linode_extra_ips": model.LabelValue(",96.126.108.16,192.168.201.25,"), + }, + { + "__address__": model.LabelValue("139.162.196.43:80"), + "__meta_linode_instance_id": model.LabelValue("26848419"), + "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-2"), + "__meta_linode_image": model.LabelValue("linode/debian10"), + "__meta_linode_private_ipv4": model.LabelValue(""), + "__meta_linode_public_ipv4": model.LabelValue("139.162.196.43"), + "__meta_linode_public_ipv6": model.LabelValue("2a01:7e00::f03c:92ff:fe1a:9976"), + "__meta_linode_private_ipv4_rdns": model.LabelValue(""), + "__meta_linode_public_ipv4_rdns": model.LabelValue("li1359-43.members.linode.com"), + "__meta_linode_public_ipv6_rdns": model.LabelValue(""), + "__meta_linode_region": model.LabelValue("eu-west"), + "__meta_linode_type": model.LabelValue("g6-standard-2"), + "__meta_linode_status": model.LabelValue("running"), + "__meta_linode_tags": model.LabelValue(",monitoring,"), + "__meta_linode_group": model.LabelValue(""), + "__meta_linode_gpus": model.LabelValue("0"), + "__meta_linode_hypervisor": model.LabelValue("kvm"), + "__meta_linode_backups": model.LabelValue("disabled"), + "__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"), + "__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"), + "__meta_linode_specs_vcpus": model.LabelValue("2"), + "__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"), + }, + { + "__address__": model.LabelValue("192.53.120.25:80"), + "__meta_linode_instance_id": model.LabelValue("26837938"), + "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-3"), + "__meta_linode_image": model.LabelValue("linode/ubuntu20.04"), + "__meta_linode_private_ipv4": model.LabelValue(""), + "__meta_linode_public_ipv4": model.LabelValue("192.53.120.25"), + "__meta_linode_public_ipv6": model.LabelValue("2600:3c04::f03c:92ff:fe1a:fb68"), + "__meta_linode_private_ipv4_rdns": model.LabelValue(""), + "__meta_linode_public_ipv4_rdns": model.LabelValue("li2216-25.members.linode.com"), + "__meta_linode_public_ipv6_rdns": model.LabelValue(""), + "__meta_linode_region": model.LabelValue("ca-central"), + "__meta_linode_type": model.LabelValue("g6-standard-1"), + "__meta_linode_status": model.LabelValue("running"), + "__meta_linode_tags": model.LabelValue(",monitoring,"), + "__meta_linode_group": model.LabelValue(""), + "__meta_linode_gpus": model.LabelValue("0"), + "__meta_linode_hypervisor": model.LabelValue("kvm"), + "__meta_linode_backups": model.LabelValue("disabled"), + "__meta_linode_specs_disk_bytes": model.LabelValue("53687091200"), + "__meta_linode_specs_memory_bytes": model.LabelValue("2147483648"), + "__meta_linode_specs_vcpus": model.LabelValue("1"), + "__meta_linode_specs_transfer_bytes": model.LabelValue("2097152000"), + "__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c04:e001:456::/64,"), + }, + { + "__address__": model.LabelValue("66.228.47.103:80"), + "__meta_linode_instance_id": model.LabelValue("26837992"), + "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-4"), + "__meta_linode_image": model.LabelValue("linode/ubuntu20.04"), + "__meta_linode_private_ipv4": model.LabelValue("192.168.148.94"), + "__meta_linode_public_ipv4": model.LabelValue("66.228.47.103"), + "__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:fb4c"), + "__meta_linode_private_ipv4_rdns": model.LabelValue(""), + "__meta_linode_public_ipv4_rdns": model.LabelValue("li328-103.members.linode.com"), + "__meta_linode_public_ipv6_rdns": model.LabelValue(""), + "__meta_linode_region": model.LabelValue("us-east"), + "__meta_linode_type": model.LabelValue("g6-nanode-1"), + "__meta_linode_status": model.LabelValue("running"), + "__meta_linode_tags": model.LabelValue(",monitoring,"), + "__meta_linode_group": model.LabelValue(""), + "__meta_linode_gpus": model.LabelValue("0"), + "__meta_linode_hypervisor": model.LabelValue("kvm"), + "__meta_linode_backups": model.LabelValue("disabled"), + "__meta_linode_specs_disk_bytes": model.LabelValue("26843545600"), + "__meta_linode_specs_memory_bytes": model.LabelValue("1073741824"), + "__meta_linode_specs_vcpus": model.LabelValue("1"), + "__meta_linode_specs_transfer_bytes": model.LabelValue("1048576000"), + "__meta_linode_extra_ips": model.LabelValue(",172.104.18.104,"), + "__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c03:e000:123::/64,"), + }, + }}, + "us-east": {region: "us-east", targetCount: 2, want: []model.LabelSet{ + { + "__address__": model.LabelValue("45.33.82.151:80"), + "__meta_linode_instance_id": model.LabelValue("26838044"), + "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-1"), + "__meta_linode_image": model.LabelValue("linode/arch"), + "__meta_linode_private_ipv4": model.LabelValue("192.168.170.51"), + "__meta_linode_public_ipv4": model.LabelValue("45.33.82.151"), + "__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:1382"), + "__meta_linode_private_ipv4_rdns": model.LabelValue(""), + "__meta_linode_public_ipv4_rdns": model.LabelValue("li1028-151.members.linode.com"), + "__meta_linode_public_ipv6_rdns": model.LabelValue(""), + "__meta_linode_region": model.LabelValue("us-east"), + "__meta_linode_type": model.LabelValue("g6-standard-2"), + "__meta_linode_status": model.LabelValue("running"), + "__meta_linode_tags": model.LabelValue(",monitoring,"), + "__meta_linode_group": model.LabelValue(""), + "__meta_linode_gpus": model.LabelValue("0"), + "__meta_linode_hypervisor": model.LabelValue("kvm"), + "__meta_linode_backups": model.LabelValue("disabled"), + "__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"), + "__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"), + "__meta_linode_specs_vcpus": model.LabelValue("2"), + "__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"), + "__meta_linode_extra_ips": model.LabelValue(",96.126.108.16,192.168.201.25,"), + }, + { + "__address__": model.LabelValue("66.228.47.103:80"), + "__meta_linode_instance_id": model.LabelValue("26837992"), + "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-4"), + "__meta_linode_image": model.LabelValue("linode/ubuntu20.04"), + "__meta_linode_private_ipv4": model.LabelValue("192.168.148.94"), + "__meta_linode_public_ipv4": model.LabelValue("66.228.47.103"), + "__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:fb4c"), + "__meta_linode_private_ipv4_rdns": model.LabelValue(""), + "__meta_linode_public_ipv4_rdns": model.LabelValue("li328-103.members.linode.com"), + "__meta_linode_public_ipv6_rdns": model.LabelValue(""), + "__meta_linode_region": model.LabelValue("us-east"), + "__meta_linode_type": model.LabelValue("g6-nanode-1"), + "__meta_linode_status": model.LabelValue("running"), + "__meta_linode_tags": model.LabelValue(",monitoring,"), + "__meta_linode_group": model.LabelValue(""), + "__meta_linode_gpus": model.LabelValue("0"), + "__meta_linode_hypervisor": model.LabelValue("kvm"), + "__meta_linode_backups": model.LabelValue("disabled"), + "__meta_linode_specs_disk_bytes": model.LabelValue("26843545600"), + "__meta_linode_specs_memory_bytes": model.LabelValue("1073741824"), + "__meta_linode_specs_vcpus": model.LabelValue("1"), + "__meta_linode_specs_transfer_bytes": model.LabelValue("1048576000"), + "__meta_linode_extra_ips": model.LabelValue(",172.104.18.104,"), + "__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c03:e000:123::/64,"), + }, + }}, + "us-central": {region: "ca-central", targetCount: 1, want: []model.LabelSet{ + { + "__address__": model.LabelValue("192.53.120.25:80"), + "__meta_linode_instance_id": model.LabelValue("26837938"), + "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-3"), + "__meta_linode_image": model.LabelValue("linode/ubuntu20.04"), + "__meta_linode_private_ipv4": model.LabelValue(""), + "__meta_linode_public_ipv4": model.LabelValue("192.53.120.25"), + "__meta_linode_public_ipv6": model.LabelValue("2600:3c04::f03c:92ff:fe1a:fb68"), + "__meta_linode_private_ipv4_rdns": model.LabelValue(""), + "__meta_linode_public_ipv4_rdns": model.LabelValue("li2216-25.members.linode.com"), + "__meta_linode_public_ipv6_rdns": model.LabelValue(""), + "__meta_linode_region": model.LabelValue("ca-central"), + "__meta_linode_type": model.LabelValue("g6-standard-1"), + "__meta_linode_status": model.LabelValue("running"), + "__meta_linode_tags": model.LabelValue(",monitoring,"), + "__meta_linode_group": model.LabelValue(""), + "__meta_linode_gpus": model.LabelValue("0"), + "__meta_linode_hypervisor": model.LabelValue("kvm"), + "__meta_linode_backups": model.LabelValue("disabled"), + "__meta_linode_specs_disk_bytes": model.LabelValue("53687091200"), + "__meta_linode_specs_memory_bytes": model.LabelValue("2147483648"), + "__meta_linode_specs_vcpus": model.LabelValue("1"), + "__meta_linode_specs_transfer_bytes": model.LabelValue("2097152000"), + "__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c04:e001:456::/64,"), + }, + }}, + } - s.Mock.HandleLinodeInstancesList() - s.Mock.HandleLinodeNeworkingIPs() - s.Mock.HandleLinodeAccountEvents() -} + for _, tc := range tests { + cfg := DefaultSDConfig + if tc.region != "" { + cfg.Region = tc.region + } + cfg.HTTPClientConfig.Authorization = &config.Authorization{ + Credentials: tokenID, + Type: "Bearer", + } -func TestLinodeSDRefresh(t *testing.T) { - sdmock := &LinodeSDTestSuite{} - sdmock.SetupTest(t) - t.Cleanup(sdmock.TearDownSuite) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() - cfg := DefaultSDConfig - cfg.HTTPClientConfig.Authorization = &config.Authorization{ - Credentials: tokenID, - Type: "Bearer", - } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) - require.NoError(t, err) - endpoint, err := url.Parse(sdmock.Mock.Endpoint()) - require.NoError(t, err) - d.client.SetBaseURL(endpoint.String()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + require.NoError(t, err) + endpoint, err := url.Parse(sdmock.Endpoint()) + require.NoError(t, err) + d.client.SetBaseURL(endpoint.String()) - tgs, err := d.refresh(context.Background()) - require.NoError(t, err) + tgs, err := d.refresh(context.Background()) + require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) - tg := tgs[0] - require.NotNil(t, tg) - require.NotNil(t, tg.Targets) - require.Equal(t, 4, len(tg.Targets)) + tg := tgs[0] + require.NotNil(t, tg) + require.NotNil(t, tg.Targets) + require.Len(t, tg.Targets, tc.targetCount) - for i, lbls := range []model.LabelSet{ - { - "__address__": model.LabelValue("45.33.82.151:80"), - "__meta_linode_instance_id": model.LabelValue("26838044"), - "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-1"), - "__meta_linode_image": model.LabelValue("linode/arch"), - "__meta_linode_private_ipv4": model.LabelValue("192.168.170.51"), - "__meta_linode_public_ipv4": model.LabelValue("45.33.82.151"), - "__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:1382"), - "__meta_linode_private_ipv4_rdns": model.LabelValue(""), - "__meta_linode_public_ipv4_rdns": model.LabelValue("li1028-151.members.linode.com"), - "__meta_linode_public_ipv6_rdns": model.LabelValue(""), - "__meta_linode_region": model.LabelValue("us-east"), - "__meta_linode_type": model.LabelValue("g6-standard-2"), - "__meta_linode_status": model.LabelValue("running"), - "__meta_linode_tags": model.LabelValue(",monitoring,"), - "__meta_linode_group": model.LabelValue(""), - "__meta_linode_hypervisor": model.LabelValue("kvm"), - "__meta_linode_backups": model.LabelValue("disabled"), - "__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"), - "__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"), - "__meta_linode_specs_vcpus": model.LabelValue("2"), - "__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"), - "__meta_linode_extra_ips": model.LabelValue(",96.126.108.16,192.168.201.25,"), - }, - { - "__address__": model.LabelValue("139.162.196.43:80"), - "__meta_linode_instance_id": model.LabelValue("26848419"), - "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-2"), - "__meta_linode_image": model.LabelValue("linode/debian10"), - "__meta_linode_private_ipv4": model.LabelValue(""), - "__meta_linode_public_ipv4": model.LabelValue("139.162.196.43"), - "__meta_linode_public_ipv6": model.LabelValue("2a01:7e00::f03c:92ff:fe1a:9976"), - "__meta_linode_private_ipv4_rdns": model.LabelValue(""), - "__meta_linode_public_ipv4_rdns": model.LabelValue("li1359-43.members.linode.com"), - "__meta_linode_public_ipv6_rdns": model.LabelValue(""), - "__meta_linode_region": model.LabelValue("eu-west"), - "__meta_linode_type": model.LabelValue("g6-standard-2"), - "__meta_linode_status": model.LabelValue("running"), - "__meta_linode_tags": model.LabelValue(",monitoring,"), - "__meta_linode_group": model.LabelValue(""), - "__meta_linode_hypervisor": model.LabelValue("kvm"), - "__meta_linode_backups": model.LabelValue("disabled"), - "__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"), - "__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"), - "__meta_linode_specs_vcpus": model.LabelValue("2"), - "__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"), - }, - { - "__address__": model.LabelValue("192.53.120.25:80"), - "__meta_linode_instance_id": model.LabelValue("26837938"), - "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-3"), - "__meta_linode_image": model.LabelValue("linode/ubuntu20.04"), - "__meta_linode_private_ipv4": model.LabelValue(""), - "__meta_linode_public_ipv4": model.LabelValue("192.53.120.25"), - "__meta_linode_public_ipv6": model.LabelValue("2600:3c04::f03c:92ff:fe1a:fb68"), - "__meta_linode_private_ipv4_rdns": model.LabelValue(""), - "__meta_linode_public_ipv4_rdns": model.LabelValue("li2216-25.members.linode.com"), - "__meta_linode_public_ipv6_rdns": model.LabelValue(""), - "__meta_linode_region": model.LabelValue("ca-central"), - "__meta_linode_type": model.LabelValue("g6-standard-1"), - "__meta_linode_status": model.LabelValue("running"), - "__meta_linode_tags": model.LabelValue(",monitoring,"), - "__meta_linode_group": model.LabelValue(""), - "__meta_linode_hypervisor": model.LabelValue("kvm"), - "__meta_linode_backups": model.LabelValue("disabled"), - "__meta_linode_specs_disk_bytes": model.LabelValue("53687091200"), - "__meta_linode_specs_memory_bytes": model.LabelValue("2147483648"), - "__meta_linode_specs_vcpus": model.LabelValue("1"), - "__meta_linode_specs_transfer_bytes": model.LabelValue("2097152000"), - }, - { - "__address__": model.LabelValue("66.228.47.103:80"), - "__meta_linode_instance_id": model.LabelValue("26837992"), - "__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-4"), - "__meta_linode_image": model.LabelValue("linode/ubuntu20.04"), - "__meta_linode_private_ipv4": model.LabelValue("192.168.148.94"), - "__meta_linode_public_ipv4": model.LabelValue("66.228.47.103"), - "__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:fb4c"), - "__meta_linode_private_ipv4_rdns": model.LabelValue(""), - "__meta_linode_public_ipv4_rdns": model.LabelValue("li328-103.members.linode.com"), - "__meta_linode_public_ipv6_rdns": model.LabelValue(""), - "__meta_linode_region": model.LabelValue("us-east"), - "__meta_linode_type": model.LabelValue("g6-nanode-1"), - "__meta_linode_status": model.LabelValue("running"), - "__meta_linode_tags": model.LabelValue(",monitoring,"), - "__meta_linode_group": model.LabelValue(""), - "__meta_linode_hypervisor": model.LabelValue("kvm"), - "__meta_linode_backups": model.LabelValue("disabled"), - "__meta_linode_specs_disk_bytes": model.LabelValue("26843545600"), - "__meta_linode_specs_memory_bytes": model.LabelValue("1073741824"), - "__meta_linode_specs_vcpus": model.LabelValue("1"), - "__meta_linode_specs_transfer_bytes": model.LabelValue("1048576000"), - "__meta_linode_extra_ips": model.LabelValue(",172.104.18.104,"), - }, - } { - t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { - require.Equal(t, lbls, tg.Targets[i]) - }) + for i, lbls := range tc.want { + t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { + require.Equal(t, lbls, tg.Targets[i]) + }) + } } } diff --git a/discovery/linode/metrics.go b/discovery/linode/metrics.go new file mode 100644 index 0000000000..8f81389226 --- /dev/null +++ b/discovery/linode/metrics.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linode + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*linodeMetrics)(nil) + +type linodeMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator + + failuresCount prometheus.Counter + + metricRegisterer discovery.MetricRegisterer +} + +func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + m := &linodeMetrics{ + refreshMetrics: rmi, + failuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_linode_failures_total", + Help: "Number of Linode service discovery refresh failures.", + }), + } + + m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ + m.failuresCount, + }) + + return m +} + +// Register implements discovery.DiscovererMetrics. +func (m *linodeMetrics) Register() error { + return m.metricRegisterer.RegisterMetrics() +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *linodeMetrics) Unregister() { + m.metricRegisterer.UnregisterMetrics() +} diff --git a/discovery/linode/mock_test.go b/discovery/linode/mock_test.go index ade726ed71..50f0572ecd 100644 --- a/discovery/linode/mock_test.go +++ b/discovery/linode/mock_test.go @@ -14,13 +14,18 @@ package linode import ( + "encoding/json" "fmt" "net/http" "net/http/httptest" + "os" + "path/filepath" "testing" ) -// SDMock is the interface for the Linode mock +const tokenID = "7b2c56dd51edd90952c1b94c472b94b176f20c5c777e376849edd8ad1c6c03bb" + +// SDMock is the interface for the Linode mock. type SDMock struct { t *testing.T Server *httptest.Server @@ -34,421 +39,43 @@ func NewSDMock(t *testing.T) *SDMock { } } -// Endpoint returns the URI to the mock server +// Endpoint returns the URI to the mock server. func (m *SDMock) Endpoint() string { return m.Server.URL + "/" } -// Setup creates the mock server +// Setup creates the mock server. func (m *SDMock) Setup() { m.Mux = http.NewServeMux() m.Server = httptest.NewServer(m.Mux) + m.t.Cleanup(m.Server.Close) + m.SetupHandlers() } -// ShutdownServer creates the mock server -func (m *SDMock) ShutdownServer() { - m.Server.Close() -} - -const tokenID = "7b2c56dd51edd90952c1b94c472b94b176f20c5c777e376849edd8ad1c6c03bb" - -// HandleLinodeInstancesList mocks linode instances list. -func (m *SDMock) HandleLinodeInstancesList() { - m.Mux.HandleFunc("/v4/linode/instances", func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) { - w.WriteHeader(http.StatusUnauthorized) - return - } - - w.Header().Set("content-type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusOK) - - fmt.Fprint(w, ` -{ - "data": [ - { - "id": 26838044, - "label": "prometheus-linode-sd-exporter-1", - "group": "", - "status": "running", - "created": "2021-05-12T04:23:44", - "updated": "2021-05-12T04:23:44", - "type": "g6-standard-2", - "ipv4": [ - "45.33.82.151", - "96.126.108.16", - "192.168.170.51", - "192.168.201.25" - ], - "ipv6": "2600:3c03::f03c:92ff:fe1a:1382/128", - "image": "linode/arch", - "region": "us-east", - "specs": { - "disk": 81920, - "memory": 4096, - "vcpus": 2, - "gpus": 0, - "transfer": 4000 - }, - "alerts": { - "cpu": 180, - "network_in": 10, - "network_out": 10, - "transfer_quota": 80, - "io": 10000 - }, - "backups": { - "enabled": false, - "schedule": { - "day": null, - "window": null - }, - "last_successful": null - }, - "hypervisor": "kvm", - "watchdog_enabled": true, - "tags": [ - "monitoring" - ] - }, - { - "id": 26848419, - "label": "prometheus-linode-sd-exporter-2", - "group": "", - "status": "running", - "created": "2021-05-12T12:41:49", - "updated": "2021-05-12T12:41:49", - "type": "g6-standard-2", - "ipv4": [ - "139.162.196.43" - ], - "ipv6": "2a01:7e00::f03c:92ff:fe1a:9976/128", - "image": "linode/debian10", - "region": "eu-west", - "specs": { - "disk": 81920, - "memory": 4096, - "vcpus": 2, - "gpus": 0, - "transfer": 4000 - }, - "alerts": { - "cpu": 180, - "network_in": 10, - "network_out": 10, - "transfer_quota": 80, - "io": 10000 - }, - "backups": { - "enabled": false, - "schedule": { - "day": null, - "window": null - }, - "last_successful": null - }, - "hypervisor": "kvm", - "watchdog_enabled": true, - "tags": [ - "monitoring" - ] - }, - { - "id": 26837938, - "label": "prometheus-linode-sd-exporter-3", - "group": "", - "status": "running", - "created": "2021-05-12T04:20:11", - "updated": "2021-05-12T04:20:11", - "type": "g6-standard-1", - "ipv4": [ - "192.53.120.25" - ], - "ipv6": "2600:3c04::f03c:92ff:fe1a:fb68/128", - "image": "linode/ubuntu20.04", - "region": "ca-central", - "specs": { - "disk": 51200, - "memory": 2048, - "vcpus": 1, - "gpus": 0, - "transfer": 2000 - }, - "alerts": { - "cpu": 90, - "network_in": 10, - "network_out": 10, - "transfer_quota": 80, - "io": 10000 - }, - "backups": { - "enabled": false, - "schedule": { - "day": null, - "window": null - }, - "last_successful": null - }, - "hypervisor": "kvm", - "watchdog_enabled": true, - "tags": [ - "monitoring" - ] - }, - { - "id": 26837992, - "label": "prometheus-linode-sd-exporter-4", - "group": "", - "status": "running", - "created": "2021-05-12T04:22:06", - "updated": "2021-05-12T04:22:06", - "type": "g6-nanode-1", - "ipv4": [ - "66.228.47.103", - "172.104.18.104", - "192.168.148.94" - ], - "ipv6": "2600:3c03::f03c:92ff:fe1a:fb4c/128", - "image": "linode/ubuntu20.04", - "region": "us-east", - "specs": { - "disk": 25600, - "memory": 1024, - "vcpus": 1, - "gpus": 0, - "transfer": 1000 - }, - "alerts": { - "cpu": 90, - "network_in": 10, - "network_out": 10, - "transfer_quota": 80, - "io": 10000 - }, - "backups": { - "enabled": false, - "schedule": { - "day": null, - "window": null - }, - "last_successful": null - }, - "hypervisor": "kvm", - "watchdog_enabled": true, - "tags": [ - "monitoring" - ] - } - ], - "page": 1, - "pages": 1, - "results": 4 -}`, - ) - }) -} - -// HandleLinodeNeworkingIPs mocks linode networking ips endpoint. -func (m *SDMock) HandleLinodeNeworkingIPs() { - m.Mux.HandleFunc("/v4/networking/ips", func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) { - w.WriteHeader(http.StatusUnauthorized) - return - } - - w.Header().Set("content-type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusOK) - - fmt.Fprint(w, ` -{ - "page": 1, - "pages": 1, - "results": 13, - "data": [ - { - "address": "192.53.120.25", - "gateway": "192.53.120.1", - "subnet_mask": "255.255.255.0", - "prefix": 24, - "type": "ipv4", - "public": true, - "rdns": "li2216-25.members.linode.com", - "linode_id": 26837938, - "region": "ca-central" - }, - { - "address": "66.228.47.103", - "gateway": "66.228.47.1", - "subnet_mask": "255.255.255.0", - "prefix": 24, - "type": "ipv4", - "public": true, - "rdns": "li328-103.members.linode.com", - "linode_id": 26837992, - "region": "us-east" - }, - { - "address": "172.104.18.104", - "gateway": "172.104.18.1", - "subnet_mask": "255.255.255.0", - "prefix": 24, - "type": "ipv4", - "public": true, - "rdns": "li1832-104.members.linode.com", - "linode_id": 26837992, - "region": "us-east" - }, - { - "address": "192.168.148.94", - "gateway": null, - "subnet_mask": "255.255.128.0", - "prefix": 17, - "type": "ipv4", - "public": false, - "rdns": null, - "linode_id": 26837992, - "region": "us-east" - }, - { - "address": "192.168.170.51", - "gateway": null, - "subnet_mask": "255.255.128.0", - "prefix": 17, - "type": "ipv4", - "public": false, - "rdns": null, - "linode_id": 26838044, - "region": "us-east" - }, - { - "address": "96.126.108.16", - "gateway": "96.126.108.1", - "subnet_mask": "255.255.255.0", - "prefix": 24, - "type": "ipv4", - "public": true, - "rdns": "li365-16.members.linode.com", - "linode_id": 26838044, - "region": "us-east" - }, - { - "address": "45.33.82.151", - "gateway": "45.33.82.1", - "subnet_mask": "255.255.255.0", - "prefix": 24, - "type": "ipv4", - "public": true, - "rdns": "li1028-151.members.linode.com", - "linode_id": 26838044, - "region": "us-east" - }, - { - "address": "192.168.201.25", - "gateway": null, - "subnet_mask": "255.255.128.0", - "prefix": 17, - "type": "ipv4", - "public": false, - "rdns": null, - "linode_id": 26838044, - "region": "us-east" - }, - { - "address": "139.162.196.43", - "gateway": "139.162.196.1", - "subnet_mask": "255.255.255.0", - "prefix": 24, - "type": "ipv4", - "public": true, - "rdns": "li1359-43.members.linode.com", - "linode_id": 26848419, - "region": "eu-west" - }, - { - "address": "2600:3c04::f03c:92ff:fe1a:fb68", - "gateway": "fe80::1", - "subnet_mask": "ffff:ffff:ffff:ffff::", - "prefix": 64, - "type": "ipv6", - "rdns": null, - "linode_id": 26837938, - "region": "ca-central", - "public": true - }, - { - "address": "2600:3c03::f03c:92ff:fe1a:fb4c", - "gateway": "fe80::1", - "subnet_mask": "ffff:ffff:ffff:ffff::", - "prefix": 64, - "type": "ipv6", - "rdns": null, - "linode_id": 26837992, - "region": "us-east", - "public": true - }, - { - "address": "2600:3c03::f03c:92ff:fe1a:1382", - "gateway": "fe80::1", - "subnet_mask": "ffff:ffff:ffff:ffff::", - "prefix": 64, - "type": "ipv6", - "rdns": null, - "linode_id": 26838044, - "region": "us-east", - "public": true - }, - { - "address": "2a01:7e00::f03c:92ff:fe1a:9976", - "gateway": "fe80::1", - "subnet_mask": "ffff:ffff:ffff:ffff::", - "prefix": 64, - "type": "ipv6", - "rdns": null, - "linode_id": 26848419, - "region": "eu-west", - "public": true - } - ] -}`, - ) - }) -} - -// HandleLinodeAccountEvents mocks linode the account/events endpoint. -func (m *SDMock) HandleLinodeAccountEvents() { - m.Mux.HandleFunc("/v4/account/events", func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) { - w.WriteHeader(http.StatusUnauthorized) - return - } - - if r.Header.Get("X-Filter") == "" { - // This should never happen; if the client sends an events request without - // a filter, cause it to fail. The error below is not a real response from - // the API, but should aid in debugging failed tests. - w.WriteHeader(http.StatusBadRequest) - fmt.Fprint(w, ` -{ - "errors": [ - { - "reason": "Request missing expected X-Filter headers" - } - ] -}`, - ) - return - } - - w.Header().Set("content-type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusOK) - - fmt.Fprint(w, ` -{ - "data": [], - "results": 0, - "pages": 1, - "page": 1 -}`, - ) - }) +// SetupHandlers for endpoints of interest. +func (m *SDMock) SetupHandlers() { + for _, handler := range []string{"/v4/account/events", "/v4/linode/instances", "/v4/networking/ips", "/v4/networking/ipv6/ranges"} { + m.Mux.HandleFunc(handler, func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) { + w.WriteHeader(http.StatusUnauthorized) + return + } + xFilter := struct { + Region string `json:"region"` + }{} + json.Unmarshal([]byte(r.Header.Get("X-Filter")), &xFilter) + + directory := "testdata/no_region_filter" + if xFilter.Region != "" { // Validate region filter matches test criteria. + directory = "testdata/" + xFilter.Region + } + if response, err := os.ReadFile(filepath.Join(directory, r.URL.Path+".json")); err == nil { + w.Header().Add("content-type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusOK) + w.Write(response) + return + } + w.WriteHeader(http.StatusInternalServerError) + }) + } } diff --git a/discovery/linode/testdata/ca-central/v4/account/events.json b/discovery/linode/testdata/ca-central/v4/account/events.json new file mode 100644 index 0000000000..ca302e4fd0 --- /dev/null +++ b/discovery/linode/testdata/ca-central/v4/account/events.json @@ -0,0 +1,6 @@ +{ + "data": [], + "results": 0, + "pages": 1, + "page": 1 +} diff --git a/discovery/linode/testdata/ca-central/v4/linode/instances.json b/discovery/linode/testdata/ca-central/v4/linode/instances.json new file mode 100644 index 0000000000..dfc1172477 --- /dev/null +++ b/discovery/linode/testdata/ca-central/v4/linode/instances.json @@ -0,0 +1,49 @@ +{ + "data": [ + { + "id": 26837938, + "label": "prometheus-linode-sd-exporter-3", + "group": "", + "status": "running", + "created": "2021-05-12T04:20:11", + "updated": "2021-05-12T04:20:11", + "type": "g6-standard-1", + "ipv4": [ + "192.53.120.25" + ], + "ipv6": "2600:3c04::f03c:92ff:fe1a:fb68/128", + "image": "linode/ubuntu20.04", + "region": "ca-central", + "specs": { + "disk": 51200, + "memory": 2048, + "vcpus": 1, + "gpus": 0, + "transfer": 2000 + }, + "alerts": { + "cpu": 90, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80, + "io": 10000 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + }, + "last_successful": null + }, + "hypervisor": "kvm", + "watchdog_enabled": true, + "tags": [ + "monitoring" + ] + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/discovery/linode/testdata/ca-central/v4/networking/ips.json b/discovery/linode/testdata/ca-central/v4/networking/ips.json new file mode 100644 index 0000000000..23d974a886 --- /dev/null +++ b/discovery/linode/testdata/ca-central/v4/networking/ips.json @@ -0,0 +1,29 @@ +{ + "page": 1, + "pages": 1, + "results": 2, + "data": [ + { + "address": "192.53.120.25", + "gateway": "192.53.120.1", + "subnet_mask": "255.255.255.0", + "prefix": 24, + "type": "ipv4", + "public": true, + "rdns": "li2216-25.members.linode.com", + "linode_id": 26837938, + "region": "ca-central" + }, + { + "address": "2600:3c04::f03c:92ff:fe1a:fb68", + "gateway": "fe80::1", + "subnet_mask": "ffff:ffff:ffff:ffff::", + "prefix": 64, + "type": "ipv6", + "rdns": null, + "linode_id": 26837938, + "region": "ca-central", + "public": true + } + ] +} diff --git a/discovery/linode/testdata/ca-central/v4/networking/ipv6/ranges.json b/discovery/linode/testdata/ca-central/v4/networking/ipv6/ranges.json new file mode 100644 index 0000000000..442615cbbc --- /dev/null +++ b/discovery/linode/testdata/ca-central/v4/networking/ipv6/ranges.json @@ -0,0 +1,13 @@ +{ + "data": [ + { + "range": "2600:3c04:e001:456::", + "prefix": 64, + "region": "ca-central", + "route_target": "2600:3c04::f03c:92ff:fe1a:fb68" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/discovery/linode/testdata/no_region_filter/v4/account/events.json b/discovery/linode/testdata/no_region_filter/v4/account/events.json new file mode 100644 index 0000000000..ca302e4fd0 --- /dev/null +++ b/discovery/linode/testdata/no_region_filter/v4/account/events.json @@ -0,0 +1,6 @@ +{ + "data": [], + "results": 0, + "pages": 1, + "page": 1 +} diff --git a/discovery/linode/testdata/no_region_filter/v4/linode/instances.json b/discovery/linode/testdata/no_region_filter/v4/linode/instances.json new file mode 100644 index 0000000000..25d5271d9c --- /dev/null +++ b/discovery/linode/testdata/no_region_filter/v4/linode/instances.json @@ -0,0 +1,180 @@ +{ + "data": [ + { + "id": 26838044, + "label": "prometheus-linode-sd-exporter-1", + "group": "", + "status": "running", + "created": "2021-05-12T04:23:44", + "updated": "2021-05-12T04:23:44", + "type": "g6-standard-2", + "ipv4": [ + "45.33.82.151", + "96.126.108.16", + "192.168.170.51", + "192.168.201.25" + ], + "ipv6": "2600:3c03::f03c:92ff:fe1a:1382/128", + "image": "linode/arch", + "region": "us-east", + "specs": { + "disk": 81920, + "memory": 4096, + "vcpus": 2, + "gpus": 0, + "transfer": 4000 + }, + "alerts": { + "cpu": 180, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80, + "io": 10000 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + }, + "last_successful": null + }, + "hypervisor": "kvm", + "watchdog_enabled": true, + "tags": [ + "monitoring" + ] + }, + { + "id": 26848419, + "label": "prometheus-linode-sd-exporter-2", + "group": "", + "status": "running", + "created": "2021-05-12T12:41:49", + "updated": "2021-05-12T12:41:49", + "type": "g6-standard-2", + "ipv4": [ + "139.162.196.43" + ], + "ipv6": "2a01:7e00::f03c:92ff:fe1a:9976/128", + "image": "linode/debian10", + "region": "eu-west", + "specs": { + "disk": 81920, + "memory": 4096, + "vcpus": 2, + "gpus": 0, + "transfer": 4000 + }, + "alerts": { + "cpu": 180, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80, + "io": 10000 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + }, + "last_successful": null + }, + "hypervisor": "kvm", + "watchdog_enabled": true, + "tags": [ + "monitoring" + ] + }, + { + "id": 26837938, + "label": "prometheus-linode-sd-exporter-3", + "group": "", + "status": "running", + "created": "2021-05-12T04:20:11", + "updated": "2021-05-12T04:20:11", + "type": "g6-standard-1", + "ipv4": [ + "192.53.120.25" + ], + "ipv6": "2600:3c04::f03c:92ff:fe1a:fb68/128", + "image": "linode/ubuntu20.04", + "region": "ca-central", + "specs": { + "disk": 51200, + "memory": 2048, + "vcpus": 1, + "gpus": 0, + "transfer": 2000 + }, + "alerts": { + "cpu": 90, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80, + "io": 10000 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + }, + "last_successful": null + }, + "hypervisor": "kvm", + "watchdog_enabled": true, + "tags": [ + "monitoring" + ] + }, + { + "id": 26837992, + "label": "prometheus-linode-sd-exporter-4", + "group": "", + "status": "running", + "created": "2021-05-12T04:22:06", + "updated": "2021-05-12T04:22:06", + "type": "g6-nanode-1", + "ipv4": [ + "66.228.47.103", + "172.104.18.104", + "192.168.148.94" + ], + "ipv6": "2600:3c03::f03c:92ff:fe1a:fb4c/128", + "image": "linode/ubuntu20.04", + "region": "us-east", + "specs": { + "disk": 25600, + "memory": 1024, + "vcpus": 1, + "gpus": 0, + "transfer": 1000 + }, + "alerts": { + "cpu": 90, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80, + "io": 10000 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + }, + "last_successful": null + }, + "hypervisor": "kvm", + "watchdog_enabled": true, + "tags": [ + "monitoring" + ] + } + ], + "page": 1, + "pages": 1, + "results": 4 +} diff --git a/discovery/linode/testdata/no_region_filter/v4/networking/ips.json b/discovery/linode/testdata/no_region_filter/v4/networking/ips.json new file mode 100644 index 0000000000..5173036f1c --- /dev/null +++ b/discovery/linode/testdata/no_region_filter/v4/networking/ips.json @@ -0,0 +1,150 @@ +{ + "page": 1, + "pages": 1, + "results": 13, + "data": [ + { + "address": "192.53.120.25", + "gateway": "192.53.120.1", + "subnet_mask": "255.255.255.0", + "prefix": 24, + "type": "ipv4", + "public": true, + "rdns": "li2216-25.members.linode.com", + "linode_id": 26837938, + "region": "ca-central" + }, + { + "address": "66.228.47.103", + "gateway": "66.228.47.1", + "subnet_mask": "255.255.255.0", + "prefix": 24, + "type": "ipv4", + "public": true, + "rdns": "li328-103.members.linode.com", + "linode_id": 26837992, + "region": "us-east" + }, + { + "address": "172.104.18.104", + "gateway": "172.104.18.1", + "subnet_mask": "255.255.255.0", + "prefix": 24, + "type": "ipv4", + "public": true, + "rdns": "li1832-104.members.linode.com", + "linode_id": 26837992, + "region": "us-east" + }, + { + "address": "192.168.148.94", + "gateway": null, + "subnet_mask": "255.255.128.0", + "prefix": 17, + "type": "ipv4", + "public": false, + "rdns": null, + "linode_id": 26837992, + "region": "us-east" + }, + { + "address": "192.168.170.51", + "gateway": null, + "subnet_mask": "255.255.128.0", + "prefix": 17, + "type": "ipv4", + "public": false, + "rdns": null, + "linode_id": 26838044, + "region": "us-east" + }, + { + "address": "96.126.108.16", + "gateway": "96.126.108.1", + "subnet_mask": "255.255.255.0", + "prefix": 24, + "type": "ipv4", + "public": true, + "rdns": "li365-16.members.linode.com", + "linode_id": 26838044, + "region": "us-east" + }, + { + "address": "45.33.82.151", + "gateway": "45.33.82.1", + "subnet_mask": "255.255.255.0", + "prefix": 24, + "type": "ipv4", + "public": true, + "rdns": "li1028-151.members.linode.com", + "linode_id": 26838044, + "region": "us-east" + }, + { + "address": "192.168.201.25", + "gateway": null, + "subnet_mask": "255.255.128.0", + "prefix": 17, + "type": "ipv4", + "public": false, + "rdns": null, + "linode_id": 26838044, + "region": "us-east" + }, + { + "address": "139.162.196.43", + "gateway": "139.162.196.1", + "subnet_mask": "255.255.255.0", + "prefix": 24, + "type": "ipv4", + "public": true, + "rdns": "li1359-43.members.linode.com", + "linode_id": 26848419, + "region": "eu-west" + }, + { + "address": "2600:3c04::f03c:92ff:fe1a:fb68", + "gateway": "fe80::1", + "subnet_mask": "ffff:ffff:ffff:ffff::", + "prefix": 64, + "type": "ipv6", + "rdns": null, + "linode_id": 26837938, + "region": "ca-central", + "public": true + }, + { + "address": "2600:3c03::f03c:92ff:fe1a:fb4c", + "gateway": "fe80::1", + "subnet_mask": "ffff:ffff:ffff:ffff::", + "prefix": 64, + "type": "ipv6", + "rdns": null, + "linode_id": 26837992, + "region": "us-east", + "public": true + }, + { + "address": "2600:3c03::f03c:92ff:fe1a:1382", + "gateway": "fe80::1", + "subnet_mask": "ffff:ffff:ffff:ffff::", + "prefix": 64, + "type": "ipv6", + "rdns": null, + "linode_id": 26838044, + "region": "us-east", + "public": true + }, + { + "address": "2a01:7e00::f03c:92ff:fe1a:9976", + "gateway": "fe80::1", + "subnet_mask": "ffff:ffff:ffff:ffff::", + "prefix": 64, + "type": "ipv6", + "rdns": null, + "linode_id": 26848419, + "region": "eu-west", + "public": true + } + ] +} diff --git a/discovery/linode/testdata/no_region_filter/v4/networking/ipv6/ranges.json b/discovery/linode/testdata/no_region_filter/v4/networking/ipv6/ranges.json new file mode 100644 index 0000000000..511a4d9a8c --- /dev/null +++ b/discovery/linode/testdata/no_region_filter/v4/networking/ipv6/ranges.json @@ -0,0 +1,19 @@ +{ + "data": [ + { + "range": "2600:3c03:e000:123::", + "prefix": 64, + "region": "us-east", + "route_target": "2600:3c03::f03c:92ff:fe1a:fb4c" + }, + { + "range": "2600:3c04:e001:456::", + "prefix": 64, + "region": "ca-central", + "route_target": "2600:3c04::f03c:92ff:fe1a:fb68" + } + ], + "page": 1, + "pages": 1, + "results": 2 +} diff --git a/discovery/linode/testdata/us-east/v4/account/events.json b/discovery/linode/testdata/us-east/v4/account/events.json new file mode 100644 index 0000000000..ca302e4fd0 --- /dev/null +++ b/discovery/linode/testdata/us-east/v4/account/events.json @@ -0,0 +1,6 @@ +{ + "data": [], + "results": 0, + "pages": 1, + "page": 1 +} diff --git a/discovery/linode/testdata/us-east/v4/linode/instances.json b/discovery/linode/testdata/us-east/v4/linode/instances.json new file mode 100644 index 0000000000..5e9a8f5abe --- /dev/null +++ b/discovery/linode/testdata/us-east/v4/linode/instances.json @@ -0,0 +1,97 @@ +{ + "data": [ + { + "id": 26838044, + "label": "prometheus-linode-sd-exporter-1", + "group": "", + "status": "running", + "created": "2021-05-12T04:23:44", + "updated": "2021-05-12T04:23:44", + "type": "g6-standard-2", + "ipv4": [ + "45.33.82.151", + "96.126.108.16", + "192.168.170.51", + "192.168.201.25" + ], + "ipv6": "2600:3c03::f03c:92ff:fe1a:1382/128", + "image": "linode/arch", + "region": "us-east", + "specs": { + "disk": 81920, + "memory": 4096, + "vcpus": 2, + "gpus": 0, + "transfer": 4000 + }, + "alerts": { + "cpu": 180, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80, + "io": 10000 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + }, + "last_successful": null + }, + "hypervisor": "kvm", + "watchdog_enabled": true, + "tags": [ + "monitoring" + ] + }, + { + "id": 26837992, + "label": "prometheus-linode-sd-exporter-4", + "group": "", + "status": "running", + "created": "2021-05-12T04:22:06", + "updated": "2021-05-12T04:22:06", + "type": "g6-nanode-1", + "ipv4": [ + "66.228.47.103", + "172.104.18.104", + "192.168.148.94" + ], + "ipv6": "2600:3c03::f03c:92ff:fe1a:fb4c/128", + "image": "linode/ubuntu20.04", + "region": "us-east", + "specs": { + "disk": 25600, + "memory": 1024, + "vcpus": 1, + "gpus": 0, + "transfer": 1000 + }, + "alerts": { + "cpu": 90, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80, + "io": 10000 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + }, + "last_successful": null + }, + "hypervisor": "kvm", + "watchdog_enabled": true, + "tags": [ + "monitoring" + ] + } + ], + "page": 1, + "pages": 1, + "results": 2 +} + diff --git a/discovery/linode/testdata/us-east/v4/networking/ips.json b/discovery/linode/testdata/us-east/v4/networking/ips.json new file mode 100644 index 0000000000..388cf59659 --- /dev/null +++ b/discovery/linode/testdata/us-east/v4/networking/ips.json @@ -0,0 +1,106 @@ +{ + "page": 1, + "pages": 1, + "results": 9, + "data": [ + { + "address": "66.228.47.103", + "gateway": "66.228.47.1", + "subnet_mask": "255.255.255.0", + "prefix": 24, + "type": "ipv4", + "public": true, + "rdns": "li328-103.members.linode.com", + "linode_id": 26837992, + "region": "us-east" + }, + { + "address": "172.104.18.104", + "gateway": "172.104.18.1", + "subnet_mask": "255.255.255.0", + "prefix": 24, + "type": "ipv4", + "public": true, + "rdns": "li1832-104.members.linode.com", + "linode_id": 26837992, + "region": "us-east" + }, + { + "address": "192.168.148.94", + "gateway": null, + "subnet_mask": "255.255.128.0", + "prefix": 17, + "type": "ipv4", + "public": false, + "rdns": null, + "linode_id": 26837992, + "region": "us-east" + }, + { + "address": "192.168.170.51", + "gateway": null, + "subnet_mask": "255.255.128.0", + "prefix": 17, + "type": "ipv4", + "public": false, + "rdns": null, + "linode_id": 26838044, + "region": "us-east" + }, + { + "address": "96.126.108.16", + "gateway": "96.126.108.1", + "subnet_mask": "255.255.255.0", + "prefix": 24, + "type": "ipv4", + "public": true, + "rdns": "li365-16.members.linode.com", + "linode_id": 26838044, + "region": "us-east" + }, + { + "address": "45.33.82.151", + "gateway": "45.33.82.1", + "subnet_mask": "255.255.255.0", + "prefix": 24, + "type": "ipv4", + "public": true, + "rdns": "li1028-151.members.linode.com", + "linode_id": 26838044, + "region": "us-east" + }, + { + "address": "192.168.201.25", + "gateway": null, + "subnet_mask": "255.255.128.0", + "prefix": 17, + "type": "ipv4", + "public": false, + "rdns": null, + "linode_id": 26838044, + "region": "us-east" + }, + { + "address": "2600:3c03::f03c:92ff:fe1a:fb4c", + "gateway": "fe80::1", + "subnet_mask": "ffff:ffff:ffff:ffff::", + "prefix": 64, + "type": "ipv6", + "rdns": null, + "linode_id": 26837992, + "region": "us-east", + "public": true + }, + { + "address": "2600:3c03::f03c:92ff:fe1a:1382", + "gateway": "fe80::1", + "subnet_mask": "ffff:ffff:ffff:ffff::", + "prefix": 64, + "type": "ipv6", + "rdns": null, + "linode_id": 26838044, + "region": "us-east", + "public": true + } + ] +} diff --git a/discovery/linode/testdata/us-east/v4/networking/ipv6/ranges.json b/discovery/linode/testdata/us-east/v4/networking/ipv6/ranges.json new file mode 100644 index 0000000000..34b2ae1cdd --- /dev/null +++ b/discovery/linode/testdata/us-east/v4/networking/ipv6/ranges.json @@ -0,0 +1,13 @@ +{ + "data": [ + { + "range": "2600:3c03:e000:123::", + "prefix": 64, + "region": "us-east", + "route_target": "2600:3c03::f03c:92ff:fe1a:fb4c" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/discovery/manager.go b/discovery/manager.go index 8b304a0faf..542655d4cc 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -28,48 +28,6 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - failedConfigs = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_sd_failed_configs", - Help: "Current number of service discovery configurations that failed to load.", - }, - []string{"name"}, - ) - discoveredTargets = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_sd_discovered_targets", - Help: "Current number of discovered targets.", - }, - []string{"name", "config"}, - ) - receivedUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_received_updates_total", - Help: "Total number of update events received from the SD providers.", - }, - []string{"name"}, - ) - delayedUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_updates_delayed_total", - Help: "Total number of update events that couldn't be sent immediately.", - }, - []string{"name"}, - ) - sentUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_updates_total", - Help: "Total number of update events sent to the SD consumers.", - }, - []string{"name"}, - ) -) - -func RegisterMetrics() { - prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates) -} - type poolKey struct { setName string provider string @@ -92,7 +50,7 @@ type Provider struct { newSubs map[string]struct{} } -// Discoverer return the Discoverer of the provider +// Discoverer return the Discoverer of the provider. func (p *Provider) Discoverer() Discoverer { return p.d } @@ -106,8 +64,24 @@ func (p *Provider) Config() interface{} { return p.config } +// CreateAndRegisterSDMetrics registers the metrics needed for SD mechanisms. +// Does not register the metrics for the Discovery Manager. +// TODO(ptodev): Add ability to unregister the metrics? +func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) { + // Some SD mechanisms use the "refresh" package, which has its own metrics. + refreshSdMetrics := NewRefreshMetrics(reg) + + // Register the metrics specific for each SD mechanism, and the ones for the refresh package. + sdMetrics, err := RegisterSDMetrics(reg, refreshSdMetrics) + if err != nil { + return nil, fmt.Errorf("failed to register service discovery metrics: %w", err) + } + + return sdMetrics, nil +} + // NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager)) *Manager { +func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { if logger == nil { logger = log.NewNopLogger() } @@ -118,10 +92,22 @@ func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager ctx: ctx, updatert: 5 * time.Second, triggerSend: make(chan struct{}, 1), + registerer: registerer, + sdMetrics: sdMetrics, } for _, option := range options { option(mgr) } + + // Register the metrics. + // We have to do this after setting all options, so that the name of the Manager is set. + if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil { + mgr.metrics = metrics + } else { + level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) + return nil + } + return mgr } @@ -134,6 +120,26 @@ func Name(n string) func(*Manager) { } } +// Updatert sets the updatert of the manager. +// Used to speed up tests. +func Updatert(u time.Duration) func(*Manager) { + return func(m *Manager) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.updatert = u + } +} + +// SkipInitialWait sets the name of the manager. This is used in serverless flavours of OTel's prometheusreceiver +// which is sensitive to startup latencies. +func SkipInitialWait() func(*Manager) { + return func(m *Manager) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.skipStartupWait = true + } +} + // HTTPClientOptions sets the list of HTTP client options to expose to // Discoverers. It is up to Discoverers to choose to use the options provided. func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) { @@ -165,11 +171,22 @@ type Manager struct { // should only be modified in unit tests. updatert time.Duration + // skipStartupWait allows the discovery manager to skip the initial wait before sending updates + // to the channel. This is used in serverless flavours of OTel's prometheusreceiver + // which is sensitive to startup latencies. + skipStartupWait bool + // The triggerSend channel signals to the Manager that new updates have been received from providers. triggerSend chan struct{} // lastProvider counts providers registered during Manager's lifetime. lastProvider uint + + // A registerer for all service discovery metrics. + registerer prometheus.Registerer + + metrics *Metrics + sdMetrics map[string]DiscovererMetrics } // Providers returns the currently configured SD providers. @@ -177,14 +194,19 @@ func (m *Manager) Providers() []*Provider { return m.providers } +// UnregisterMetrics unregisters manager metrics. It does not unregister +// service discovery or refresh metrics, whose lifecycle is managed independent +// of the discovery Manager. +func (m *Manager) UnregisterMetrics() { + m.metrics.Unregister(m.registerer) +} + // Run starts the background processing. func (m *Manager) Run() error { go m.sender() - for range m.ctx.Done() { - m.cancelDiscoverers() - return m.ctx.Err() - } - return nil + <-m.ctx.Done() + m.cancelDiscoverers() + return m.ctx.Err() } // SyncCh returns a read only channel used by all the clients to receive target updates. @@ -202,12 +224,18 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { for name, scfg := range cfg { failedCount += m.registerProviders(scfg, name) } - failedConfigs.WithLabelValues(m.name).Set(float64(failedCount)) + m.metrics.FailedConfigs.Set(float64(failedCount)) var ( - wg sync.WaitGroup - // keep shows if we keep any providers after reload. - keep bool + // The updater goroutine is responsible for gathering updates to the + // tsets and notifying the manager but in some cases, the reload will + // update the tsets as well (for example: providers change during a + // reload and old targets need to be cleared). + // shouldNotify tracks whether this call to ApplyConfig updates the + // targets and is used to determine whether a notification to the + // manager needs to be triggered. + shouldNotify bool + wg sync.WaitGroup newProviders []*Provider ) for _, prov := range m.providers { @@ -221,24 +249,29 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { continue } newProviders = append(newProviders, prov) - // refTargets keeps reference targets used to populate new subs' targets + // refTargets keeps reference targets used to populate new subs' targets as they should be the same. var refTargets map[string]*targetgroup.Group prov.mu.Lock() m.targetsMtx.Lock() for s := range prov.subs { - keep = true + // Since we have existing subs, and have new subs as well - we are + // going to be making changes to the targets map (deleting obsolete + // subs' targets and setting new subs' targets). Therefore, + // regardless of the discoverer we should notify the downstream + // managers so they can update the full target state. + shouldNotify = true refTargets = m.targets[poolKey{s, prov.name}] // Remove obsolete subs' targets. if _, ok := prov.newSubs[s]; !ok { delete(m.targets, poolKey{s, prov.name}) - discoveredTargets.DeleteLabelValues(m.name, s) + m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, s) } } // Set metrics and targets for new subs. for s := range prov.newSubs { if _, ok := prov.subs[s]; !ok { - discoveredTargets.WithLabelValues(m.name, s).Set(0) + m.metrics.DiscoveredTargets.WithLabelValues(s).Set(0) } if l := len(refTargets); l > 0 { m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l) @@ -256,11 +289,18 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { m.startProvider(m.ctx, prov) } } + + // This also helps making the downstream managers drop stale targets as soon as possible. + // See https://github.com/prometheus/prometheus/pull/13147 for details. + if !reflect.DeepEqual(m.providers, newProviders) { + shouldNotify = true + } + // Currently downstream managers expect full target state upon config reload, so we must oblige. // While startProvider does pull the trigger, it may take some time to do so, therefore // we pull the trigger as soon as possible so that downstream managers can populate their state. // See https://github.com/prometheus/prometheus/pull/8639 for details. - if keep { + if shouldNotify { select { case m.triggerSend <- struct{}{}: default: @@ -281,7 +321,9 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D name: {}, }, } + m.mtx.Lock() m.providers = append(m.providers, p) + m.mtx.Unlock() m.startProvider(ctx, p) } @@ -318,7 +360,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ case <-ctx.Done(): return case tgs, ok := <-updates: - receivedUpdates.WithLabelValues(m.name).Inc() + m.metrics.ReceivedUpdates.Inc() if !ok { level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) // Wait for provider cancellation to ensure targets are cleaned up when expected. @@ -344,6 +386,33 @@ func (m *Manager) sender() { ticker := time.NewTicker(m.updatert) defer ticker.Stop() + // Send the targets downstream as soon as you see them if skipStartupWait is + // set. If discovery receiver's channel is too busy, fall back to the + // regular loop. + if m.skipStartupWait { + select { + case <-m.triggerSend: + m.metrics.SentUpdates.Inc() + select { + case m.syncCh <- m.allGroups(): + case <-ticker.C: + m.metrics.DelayedUpdates.Inc() + level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") + select { + case m.triggerSend <- struct{}{}: + default: + } + case <-m.ctx.Done(): + return + } + case <-m.ctx.Done(): + return + } + + // We restart the ticker to ensure that no two updates are less than updatert apart. + ticker.Reset(m.updatert) + } + for { select { case <-m.ctx.Done(): @@ -351,11 +420,11 @@ func (m *Manager) sender() { case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker. select { case <-m.triggerSend: - sentUpdates.WithLabelValues(m.name).Inc() + m.metrics.SentUpdates.Inc() select { case m.syncCh <- m.allGroups(): default: - delayedUpdates.WithLabelValues(m.name).Inc() + m.metrics.DelayedUpdates.Inc() level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: @@ -386,8 +455,16 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { m.targets[poolKey] = make(map[string]*targetgroup.Group) } for _, tg := range tgs { - if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. + // Some Discoverers send nil target group so need to check for it to avoid panics. + if tg == nil { + continue + } + if len(tg.Targets) > 0 { m.targets[poolKey][tg.Source] = tg + } else { + // The target group is empty, drop the corresponding entry to avoid leaks. + // In case the group yielded targets before, allGroups() will take care of making consumers drop them. + delete(m.targets[poolKey], tg.Source) } } } @@ -396,19 +473,33 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group { tSets := map[string][]*targetgroup.Group{} n := map[string]int{} + m.mtx.RLock() m.targetsMtx.Lock() - defer m.targetsMtx.Unlock() - for pkey, tsets := range m.targets { - for _, tg := range tsets { - // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' - // to signal that it needs to stop all scrape loops for this target set. - tSets[pkey.setName] = append(tSets[pkey.setName], tg) - n[pkey.setName] += len(tg.Targets) + for _, p := range m.providers { + p.mu.RLock() + for s := range p.subs { + // Send empty lists for subs without any targets to make sure old stale targets are dropped by consumers. + // See: https://github.com/prometheus/prometheus/issues/12858 for details. + if _, ok := tSets[s]; !ok { + tSets[s] = []*targetgroup.Group{} + n[s] = 0 + } + if tsets, ok := m.targets[poolKey{s, p.name}]; ok { + for _, tg := range tsets { + tSets[s] = append(tSets[s], tg) + n[s] += len(tg.Targets) + } + } } + p.mu.RUnlock() } + m.targetsMtx.Unlock() + m.mtx.RUnlock() + for setName, v := range n { - discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v)) + m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) } + return tSets } @@ -430,6 +521,7 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { d, err := cfg.NewDiscoverer(DiscovererOptions{ Logger: log.With(m.logger, "discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, + Metrics: m.sdMetrics[typ], }) if err != nil { level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 5371608112..58349bb635 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -35,6 +36,13 @@ func TestMain(m *testing.M) { testutil.TolerantVerifyLeak(m) } +func NewTestMetrics(t *testing.T, reg prometheus.Registerer) (RefreshMetricsManager, map[string]DiscovererMetrics) { + refreshMetrics := NewRefreshMetrics(reg) + sdMetrics, err := RegisterSDMetrics(reg, refreshMetrics) + require.NoError(t, err) + return refreshMetrics, sdMetrics +} + // TestTargetUpdatesOrder checks that the target updates are received in the expected order. func TestTargetUpdatesOrder(t *testing.T) { // The order by which the updates are send is determined by the interval passed to the mock discovery adapter @@ -664,7 +672,11 @@ func TestTargetUpdatesOrder(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond var totalUpdatesCount int @@ -682,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) { for x := 0; x < totalUpdatesCount; x++ { select { case <-ctx.Done(): - t.Fatalf("%d: no update arrived within the timeout limit", x) + require.FailNow(t, "%d: no update arrived within the timeout limit", x) case tgs := <-provUpdates: discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) for _, got := range discoveryManager.allGroups() { @@ -708,7 +720,7 @@ func staticConfig(addrs ...string) StaticConfig { var cfg StaticConfig for i, addr := range addrs { cfg = append(cfg, &targetgroup.Group{ - Source: fmt.Sprint(i), + Source: strconv.Itoa(i), Targets: []model.LabelSet{ {model.AddressLabel: model.LabelValue(addr)}, }, @@ -721,7 +733,6 @@ func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, t.Helper() if _, ok := tGroups[key]; !ok { t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups) - return } match := false var mergedTargets string @@ -744,10 +755,8 @@ func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) { t.Helper() - if _, ok := tSets[poolKey]; !ok { - t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) - return - } + _, ok := tSets[poolKey] + require.True(t, ok, "'%s' should be present in Pool keys: %v", poolKey, tSets) match := false var mergedTargets string @@ -764,7 +773,7 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou if !present { msg = "not" } - t.Fatalf("%q should %s be present in Targets labels: %q", label, msg, mergedTargets) + require.FailNow(t, "%q should %s be present in Targets labels: %q", label, msg, mergedTargets) } } @@ -775,10 +784,45 @@ func pk(provider, setName string, n int) poolKey { } } +func TestTargetSetTargetGroupsPresentOnStartup(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics, SkipInitialWait()) + require.NotNil(t, discoveryManager) + + // Set the updatert to a super long time so we can verify that the skip worked correctly. + discoveryManager.updatert = 100 * time.Hour + go discoveryManager.Run() + + c := map[string]Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + syncedTargets := <-discoveryManager.SyncCh() + require.Len(t, syncedTargets, 1) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Len(t, syncedTargets["prometheus"], 1) + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Len(t, discoveryManager.targets, 1) +} + func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -790,27 +834,32 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) } func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -822,12 +871,12 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) c["prometheus2"] = c["prometheus"] delete(c, "prometheus") @@ -836,16 +885,21 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { syncedTargets = <-discoveryManager.SyncCh() p = pk("static", "prometheus2", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) } func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -860,30 +914,35 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi c["prometheus2"] = c["prometheus"] discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 2, len(syncedTargets)) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 2, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 2) delete(c, "prometheus") discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() p = pk("static", "prometheus2", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) } func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -895,9 +954,9 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) var mu sync.Mutex c["prometheus2"] = Configs{ @@ -910,41 +969,48 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { discoveryManager.ApplyConfig(c) // Original targets should be present as soon as possible. + // An empty list should be sent for prometheus2 to drop any stale targets syncedTargets = <-discoveryManager.SyncCh() mu.Unlock() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) + require.Empty(t, syncedTargets["prometheus2"]) // prometheus2 configs should be ready on second sync. syncedTargets = <-discoveryManager.SyncCh() - require.Equal(t, 2, len(syncedTargets)) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"bar:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) p = pk("lockstatic", "prometheus2", 1) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) - require.Equal(t, 2, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 2) // Delete part of config and ensure only original targets exist. delete(c, "prometheus2") discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) } func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -959,31 +1025,36 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) { p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true) - require.Equal(t, 2, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 2) c["prometheus"] = Configs{ staticConfig("foo:9090"), } discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) p = pk("static", "prometheus", 1) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", false) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) } func TestDiscovererConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1001,21 +1072,26 @@ func TestDiscovererConfigs(t *testing.T) { verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) p = pk("static", "prometheus", 1) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"baz:9090\"}", true) - require.Equal(t, 2, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 2) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"baz:9090\"}", true) - require.Equal(t, 3, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 3) } // TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after -// removing all targets from the static_configs sends an update with empty targetGroups. -// This is required to signal the receiver that this target set has no current targets. +// removing all targets from the static_configs cleans the corresponding targetGroups entries to avoid leaks and sends an empty update. +// The update is required to signal the consumers that the previous targets should be dropped. func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1029,9 +1105,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { syncedTargets := <-discoveryManager.SyncCh() p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) c["prometheus"] = Configs{ StaticConfig{{}}, @@ -1039,30 +1115,25 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() + require.Len(t, discoveryManager.targets, 1) p = pk("static", "prometheus", 1) targetGroups, ok := discoveryManager.targets[p] - if !ok { - t.Fatalf("'%v' should be present in target groups", p) - } - group, ok := targetGroups[""] - if !ok { - t.Fatalf("missing '' key in target groups %v", targetGroups) - } - - if len(group.Targets) != 0 { - t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) - } - require.Equal(t, 1, len(syncedTargets)) - require.Equal(t, 1, len(syncedTargets["prometheus"])) - if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil { - t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls) - } + require.True(t, ok, "'%v' should be present in targets", p) + // Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436. + require.Empty(t, targetGroups, 0, "'%v' should no longer have any associated target groups", p) + require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.") + require.Empty(t, syncedTargets["prometheus"], 0) } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, nil) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, nil, reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1079,14 +1150,12 @@ func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { syncedTargets := <-discoveryManager.SyncCh() verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true) verifyPresence(t, discoveryManager.targets, pk("static", "prometheus2", 0), "{__address__=\"foo:9090\"}", true) - if len(discoveryManager.providers) != 1 { - t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) - } - require.Equal(t, 2, len(syncedTargets)) + require.Len(t, discoveryManager.providers, 1, "Invalid number of providers.") + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) } func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { @@ -1098,7 +1167,12 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1118,11 +1192,21 @@ type errorConfig struct{ err error } func (e errorConfig) Name() string { return "error" } func (e errorConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return nil, e.err } +// NewDiscovererMetrics implements discovery.Config. +func (errorConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { + return &NoopDiscovererMetrics{} +} + type lockStaticConfig struct { mu *sync.Mutex config StaticConfig } +// NewDiscovererMetrics implements discovery.Config. +func (lockStaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { + return &NoopDiscovererMetrics{} +} + func (s lockStaticConfig) Name() string { return "lockstatic" } func (s lockStaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return (lockStaticDiscoverer)(s), nil @@ -1144,7 +1228,12 @@ func (s lockStaticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup. func TestGaugeFailedConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1158,10 +1247,8 @@ func TestGaugeFailedConfigs(t *testing.T) { discoveryManager.ApplyConfig(c) <-discoveryManager.SyncCh() - failedCount := client_testutil.ToFloat64(failedConfigs) - if failedCount != 3 { - t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount) - } + failedCount := client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) + require.Equal(t, 3.0, failedCount, "Expected to have 3 failed configs.") c["prometheus"] = Configs{ staticConfig("foo:9090"), @@ -1169,10 +1256,8 @@ func TestGaugeFailedConfigs(t *testing.T) { discoveryManager.ApplyConfig(c) <-discoveryManager.SyncCh() - failedCount = client_testutil.ToFloat64(failedConfigs) - if failedCount != 0 { - t.Fatalf("Expected to get no failed config, got: %v", failedCount) - } + failedCount = client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) + require.Equal(t, 0.0, failedCount, "Expected to get no failed config.") } func TestCoordinationWithReceiver(t *testing.T) { @@ -1220,6 +1305,7 @@ func TestCoordinationWithReceiver(t *testing.T) { Targets: []model.LabelSet{{"__instance__": "1"}}, }, }, + "mock1": {}, }, }, { @@ -1300,7 +1386,11 @@ func TestCoordinationWithReceiver(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - mgr := NewManager(ctx, nil) + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + mgr := NewManager(ctx, nil, reg, sdMetrics) + require.NotNil(t, mgr) mgr.updatert = updateDelay go mgr.Run() @@ -1312,19 +1402,14 @@ func TestCoordinationWithReceiver(t *testing.T) { time.Sleep(expected.delay) select { case <-ctx.Done(): - t.Fatalf("step %d: no update received in the expected timeframe", i) + require.FailNow(t, "step %d: no update received in the expected timeframe", i) case tgs, ok := <-mgr.SyncCh(): - if !ok { - t.Fatalf("step %d: discovery manager channel is closed", i) - } - if len(tgs) != len(expected.tgs) { - t.Fatalf("step %d: target groups mismatch, got: %d, expected: %d\ngot: %#v\nexpected: %#v", - i, len(tgs), len(expected.tgs), tgs, expected.tgs) - } + require.True(t, ok, "step %d: discovery manager channel is closed", i) + require.Equal(t, len(expected.tgs), len(tgs), "step %d: targets mismatch", i) + for k := range expected.tgs { - if _, ok := tgs[k]; !ok { - t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) - } + _, ok := tgs[k] + require.True(t, ok, "step %d: target group not found: %s", i, k) assertEqualGroups(t, tgs[k], expected.tgs[k]) } } @@ -1392,10 +1477,15 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { // TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when // ApplyConfig happens at the same time as targets update. -func TestTargetSetTargetGroupsUpdateDuringApplyConfig(*testing.T) { +func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1456,6 +1546,11 @@ func newTestDiscoverer() *testDiscoverer { } } +// NewDiscovererMetrics implements discovery.Config. +func (*testDiscoverer) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { + return &NoopDiscovererMetrics{} +} + // Name implements Config. func (t *testDiscoverer) Name() string { return "test" @@ -1477,3 +1572,24 @@ func (t *testDiscoverer) update(tgs []*targetgroup.Group) { <-t.ready t.up <- tgs } + +func TestUnregisterMetrics(t *testing.T) { + reg := prometheus.NewRegistry() + // Check that all metrics can be unregistered, allowing a second manager to be created. + for i := 0; i < 2; i++ { + ctx, cancel := context.WithCancel(context.Background()) + + refreshMetrics, sdMetrics := NewTestMetrics(t, reg) + + discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + // discoveryManager will be nil if there was an error configuring metrics. + require.NotNil(t, discoveryManager) + // Unregister all metrics. + discoveryManager.UnregisterMetrics() + for _, sdMetric := range sdMetrics { + sdMetric.Unregister() + } + refreshMetrics.Unregister() + cancel() + } +} diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index cfd3e2c083..38b47accff 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -28,6 +28,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -48,7 +49,7 @@ const ( // imageLabel is the label that is used for the docker image running the service. imageLabel model.LabelName = metaLabelPrefix + "image" // portIndexLabel is the integer port index when multiple ports are defined; - // e.g. PORT1 would have a value of '1' + // e.g. PORT1 would have a value of '1'. portIndexLabel model.LabelName = metaLabelPrefix + "port_index" // taskLabel contains the mesos task name of the app instance. taskLabel model.LabelName = metaLabelPrefix + "task" @@ -78,12 +79,19 @@ type SDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &marathonMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "marathon" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(*c, opts.Logger) + return NewDiscovery(*c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -106,14 +114,16 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if len(c.AuthToken) > 0 && len(c.AuthTokenFile) > 0 { return errors.New("marathon_sd: at most one of auth_token & auth_token_file must be configured") } - if c.HTTPClientConfig.BasicAuth != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") - } - if (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") - } - if c.HTTPClientConfig.Authorization != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") + + if len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0 { + switch { + case c.HTTPClientConfig.BasicAuth != nil: + return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") + case len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0: + return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") + case c.HTTPClientConfig.Authorization != nil: + return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") + } } return c.HTTPClientConfig.Validate() } @@ -130,7 +140,12 @@ type Discovery struct { } // NewDiscovery returns a new Marathon Discovery. -func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*marathonMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd") if err != nil { return nil, err @@ -152,10 +167,13 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { appsClient: fetchApps, } d.Discovery = refresh.NewDiscovery( - logger, - "marathon", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "marathon", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } @@ -321,7 +339,7 @@ type appListClient func(ctx context.Context, client *http.Client, url string) (* // fetchApps requests a list of applications from a marathon server. func fetchApps(ctx context.Context, client *http.Client, url string) (*appList, error) { - request, err := http.NewRequest("GET", url, nil) + request, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return nil, err } @@ -435,7 +453,6 @@ func targetsForApp(app *app) []model.LabelSet { // Gather info about the app's 'tasks'. Each instance (container) is considered a task // and can be reachable at one or more host:port endpoints. for _, t := range app.Tasks { - // There are no labels to gather if only Ports is defined. (eg. with host networking) // Ports can only be gathered from the Task (not from the app) and are guaranteed // to be the same across all tasks. If we haven't gathered any ports by now, @@ -446,7 +463,6 @@ func targetsForApp(app *app) []model.LabelSet { // Iterate over the ports we gathered using one of the methods above. for i, port := range ports { - // A zero port here means that either the portMapping has a zero port defined, // or there is a portDefinition with requirePorts set to false. This means the port // is auto-generated by Mesos and needs to be looked up in the task. @@ -489,7 +505,7 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string { host = task.Host } - return net.JoinHostPort(host, fmt.Sprintf("%d", port)) + return net.JoinHostPort(host, strconv.Itoa(int(port))) } // Get a list of ports and a list of labels from a PortMapping. @@ -498,7 +514,6 @@ func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32 labels := make([]map[string]string, len(portMappings)) for i := 0; i < len(portMappings); i++ { - labels[i] = portMappings[i].Labels if containerNet { diff --git a/discovery/marathon/marathon_test.go b/discovery/marathon/marathon_test.go index 258e3c8ddf..659899f163 100644 --- a/discovery/marathon/marathon_test.go +++ b/discovery/marathon/marathon_test.go @@ -21,8 +21,11 @@ import ( "net/http/httptest" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -36,7 +39,19 @@ func testConfig() SDConfig { } func testUpdateServices(client appListClient) ([]*targetgroup.Group, error) { - md, err := NewDiscovery(testConfig(), nil) + cfg := testConfig() + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + err := metrics.Register() + if err != nil { + return nil, err + } + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + md, err := NewDiscovery(cfg, nil, metrics) if err != nil { return nil, err } @@ -54,23 +69,15 @@ func TestMarathonSDHandleError(t *testing.T) { } ) tgs, err := testUpdateServices(client) - if !errors.Is(err, errTesting) { - t.Fatalf("Expected error: %s", err) - } - if len(tgs) != 0 { - t.Fatalf("Got group: %s", tgs) - } + require.ErrorIs(t, err, errTesting) + require.Empty(t, tgs, "Expected no target groups.") } func TestMarathonSDEmptyList(t *testing.T) { client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil } tgs, err := testUpdateServices(client) - if err != nil { - t.Fatalf("Got error: %s", err) - } - if len(tgs) > 0 { - t.Fatalf("Got group: %v", tgs) - } + require.NoError(t, err) + require.Empty(t, tgs, "Expected no target groups.") } func marathonTestAppList(labels map[string]string, runningTasks int) *appList { @@ -104,66 +111,49 @@ func TestMarathonSDSendGroup(t *testing.T) { return marathonTestAppList(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) - if err != nil { - t.Fatalf("Got error: %s", err) - } - if len(tgs) != 1 { - t.Fatal("Expected 1 target group, got", len(tgs)) - } + require.NoError(t, err) + require.Len(t, tgs, 1, "Expected 1 target group.") tg := tgs[0] + require.Equal(t, "test-service", tg.Source, "Wrong target group name.") + require.Len(t, tg.Targets, 1, "Expected 1 target.") - if tg.Source != "test-service" { - t.Fatalf("Wrong target group name: %s", tg.Source) - } - if len(tg.Targets) != 1 { - t.Fatalf("Wrong number of targets: %v", tg.Targets) - } tgt := tg.Targets[0] - if tgt[model.AddressLabel] != "mesos-slave1:31000" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" { - t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") } func TestMarathonSDRemoveApp(t *testing.T) { - md, err := NewDiscovery(testConfig(), nil) - if err != nil { - t.Fatalf("%s", err) - } + cfg := testConfig() + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + md, err := NewDiscovery(cfg, nil, metrics) + require.NoError(t, err) md.appsClient = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return marathonTestAppList(marathonValidLabel, 1), nil } tgs, err := md.refresh(context.Background()) - if err != nil { - t.Fatalf("Got error on first update: %s", err) - } - if len(tgs) != 1 { - t.Fatal("Expected 1 targetgroup, got", len(tgs)) - } + require.NoError(t, err, "Got error on first update.") + require.Len(t, tgs, 1, "Expected 1 targetgroup.") tg1 := tgs[0] md.appsClient = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return marathonTestAppList(marathonValidLabel, 0), nil } tgs, err = md.refresh(context.Background()) - if err != nil { - t.Fatalf("Got error on second update: %s", err) - } - if len(tgs) != 1 { - t.Fatal("Expected 1 targetgroup, got", len(tgs)) - } + require.NoError(t, err, "Got error on second update.") + require.Len(t, tgs, 1, "Expected 1 targetgroup.") + tg2 := tgs[0] - if tg2.Source != tg1.Source { - if len(tg2.Targets) > 0 { - t.Errorf("Got a non-empty target set: %s", tg2.Targets) - } - t.Fatalf("Source is different: %s != %s", tg1.Source, tg2.Source) - } + require.NotEmpty(t, tg2.Targets, "Got a non-empty target set.") + require.Equal(t, tg1.Source, tg2.Source, "Source is different.") } func marathonTestAppListWithMultiplePorts(labels map[string]string, runningTasks int) *appList { @@ -198,34 +188,22 @@ func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) { return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) - if err != nil { - t.Fatalf("Got error: %s", err) - } - if len(tgs) != 1 { - t.Fatal("Expected 1 target group, got", len(tgs)) - } + require.NoError(t, err) + require.Len(t, tgs, 1, "Expected 1 target group.") + tg := tgs[0] + require.Equal(t, "test-service", tg.Source, "Wrong target group name.") + require.Len(t, tg.Targets, 2, "Wrong number of targets.") - if tg.Source != "test-service" { - t.Fatalf("Wrong target group name: %s", tg.Source) - } - if len(tg.Targets) != 2 { - t.Fatalf("Wrong number of targets: %v", tg.Targets) - } tgt := tg.Targets[0] - if tgt[model.AddressLabel] != "mesos-slave1:31000" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" { - t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), + "Wrong portMappings label from the first port: %s", tgt[model.AddressLabel]) + tgt = tg.Targets[1] - if tgt[model.AddressLabel] != "mesos-slave1:32000" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), + "Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) } func marathonTestZeroTaskPortAppList(labels map[string]string, runningTasks int) *appList { @@ -255,20 +233,12 @@ func TestMarathonZeroTaskPorts(t *testing.T) { return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) - if err != nil { - t.Fatalf("Got error: %s", err) - } - if len(tgs) != 1 { - t.Fatal("Expected 1 target group, got", len(tgs)) - } - tg := tgs[0] + require.NoError(t, err) + require.Len(t, tgs, 1, "Expected 1 target group.") - if tg.Source != "test-service-zero-ports" { - t.Fatalf("Wrong target group name: %s", tg.Source) - } - if len(tg.Targets) != 0 { - t.Fatalf("Wrong number of targets: %v", tg.Targets) - } + tg := tgs[0] + require.Equal(t, "test-service-zero-ports", tg.Source, "Wrong target group name.") + require.Empty(t, tg.Targets, "Wrong number of targets.") } func Test500ErrorHttpResponseWithValidJSONBody(t *testing.T) { @@ -283,9 +253,7 @@ func Test500ErrorHttpResponseWithValidJSONBody(t *testing.T) { defer ts.Close() // Execute test case and validate behavior. _, err := testUpdateServices(nil) - if err == nil { - t.Fatalf("Expected error for 5xx HTTP response from marathon server, got nil") - } + require.Error(t, err, "Expected error for 5xx HTTP response from marathon server.") } func marathonTestAppListWithPortDefinitions(labels map[string]string, runningTasks int) *appList { @@ -323,40 +291,24 @@ func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) { return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) - if err != nil { - t.Fatalf("Got error: %s", err) - } - if len(tgs) != 1 { - t.Fatal("Expected 1 target group, got", len(tgs)) - } + require.NoError(t, err) + require.Len(t, tgs, 1, "Expected 1 target group.") + tg := tgs[0] + require.Equal(t, "test-service", tg.Source, "Wrong target group name.") + require.Len(t, tg.Targets, 2, "Wrong number of targets.") - if tg.Source != "test-service" { - t.Fatalf("Wrong target group name: %s", tg.Source) - } - if len(tg.Targets) != 2 { - t.Fatalf("Wrong number of targets: %v", tg.Targets) - } tgt := tg.Targets[0] - if tgt[model.AddressLabel] != "mesos-slave1:1234" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:1234", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), + "Wrong portMappings label from the first port.") + require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), + "Wrong portDefinitions label from the first port.") + tgt = tg.Targets[1] - if tgt[model.AddressLabel] != "mesos-slave1:5678" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "yes" { - t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:5678", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Empty(t, tgt[model.LabelName(portMappingLabelPrefix+"prometheus")], "Wrong portMappings label from the second port.") + require.Equal(t, "yes", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithPortDefinitionsRequirePorts(labels map[string]string, runningTasks int) *appList { @@ -393,40 +345,22 @@ func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) { return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) - if err != nil { - t.Fatalf("Got error: %s", err) - } - if len(tgs) != 1 { - t.Fatal("Expected 1 target group, got", len(tgs)) - } + require.NoError(t, err) + require.Len(t, tgs, 1, "Expected 1 target group.") + tg := tgs[0] + require.Equal(t, "test-service", tg.Source, "Wrong target group name.") + require.Len(t, tg.Targets, 2, "Wrong number of targets.") - if tg.Source != "test-service" { - t.Fatalf("Wrong target group name: %s", tg.Source) - } - if len(tg.Targets) != 2 { - t.Fatalf("Wrong number of targets: %v", tg.Targets) - } tgt := tg.Targets[0] - if tgt[model.AddressLabel] != "mesos-slave1:31000" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") + require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + tgt = tg.Targets[1] - if tgt[model.AddressLabel] != "mesos-slave1:32000" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "yes" { - t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Equal(t, "yes", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithPorts(labels map[string]string, runningTasks int) *appList { @@ -458,40 +392,22 @@ func TestMarathonSDSendGroupWithPorts(t *testing.T) { return marathonTestAppListWithPorts(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) - if err != nil { - t.Fatalf("Got error: %s", err) - } - if len(tgs) != 1 { - t.Fatal("Expected 1 target group, got", len(tgs)) - } + require.NoError(t, err) + require.Len(t, tgs, 1, "Expected 1 target group.") + tg := tgs[0] + require.Equal(t, "test-service", tg.Source, "Wrong target group name.") + require.Len(t, tg.Targets, 2, "Wrong number of targets.") - if tg.Source != "test-service" { - t.Fatalf("Wrong target group name: %s", tg.Source) - } - if len(tg.Targets) != 2 { - t.Fatalf("Wrong number of targets: %v", tg.Targets) - } tgt := tg.Targets[0] - if tgt[model.AddressLabel] != "mesos-slave1:31000" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") + require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + tgt = tg.Targets[1] - if tgt[model.AddressLabel] != "mesos-slave1:32000" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithContainerPortMappings(labels map[string]string, runningTasks int) *appList { @@ -532,40 +448,22 @@ func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) { return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) - if err != nil { - t.Fatalf("Got error: %s", err) - } - if len(tgs) != 1 { - t.Fatal("Expected 1 target group, got", len(tgs)) - } + require.NoError(t, err) + require.Len(t, tgs, 1, "Expected 1 target group.") + tg := tgs[0] + require.Equal(t, "test-service", tg.Source, "Wrong target group name.") + require.Len(t, tg.Targets, 2, "Wrong number of targets.") - if tg.Source != "test-service" { - t.Fatalf("Wrong target group name: %s", tg.Source) - } - if len(tg.Targets) != 2 { - t.Fatalf("Wrong number of targets: %v", tg.Targets) - } tgt := tg.Targets[0] - if tgt[model.AddressLabel] != "mesos-slave1:12345" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" { - t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") + require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + tgt = tg.Targets[1] - if tgt[model.AddressLabel] != "mesos-slave1:32000" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string, runningTasks int) *appList { @@ -606,40 +504,22 @@ func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) { return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) - if err != nil { - t.Fatalf("Got error: %s", err) - } - if len(tgs) != 1 { - t.Fatal("Expected 1 target group, got", len(tgs)) - } + require.NoError(t, err) + require.Len(t, tgs, 1, "Expected 1 target group.") + tg := tgs[0] + require.Equal(t, "test-service", tg.Source, "Wrong target group name.") + require.Len(t, tg.Targets, 2, "Wrong number of targets.") - if tg.Source != "test-service" { - t.Fatalf("Wrong target group name: %s", tg.Source) - } - if len(tg.Targets) != 2 { - t.Fatalf("Wrong number of targets: %v", tg.Targets) - } tgt := tg.Targets[0] - if tgt[model.AddressLabel] != "mesos-slave1:31000" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" { - t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") + require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + tgt = tg.Targets[1] - if tgt[model.AddressLabel] != "mesos-slave1:12345" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]string, runningTasks int) *appList { @@ -684,38 +564,20 @@ func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) { return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil } tgs, err := testUpdateServices(client) - if err != nil { - t.Fatalf("Got error: %s", err) - } - if len(tgs) != 1 { - t.Fatal("Expected 1 target group, got", len(tgs)) - } + require.NoError(t, err) + require.Len(t, tgs, 1, "Expected 1 target group.") + tg := tgs[0] + require.Equal(t, "test-service", tg.Source, "Wrong target group name.") + require.Len(t, tg.Targets, 2, "Wrong number of targets.") - if tg.Source != "test-service" { - t.Fatalf("Wrong target group name: %s", tg.Source) - } - if len(tg.Targets) != 2 { - t.Fatalf("Wrong number of targets: %v", tg.Targets) - } tgt := tg.Targets[0] - if tgt[model.AddressLabel] != "1.2.3.4:8080" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" { - t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "1.2.3.4:8080", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") + require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + tgt = tg.Targets[1] - if tgt[model.AddressLabel] != "1.2.3.4:1234" { - t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) - } - if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { - t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel]) - } + require.Equal(t, "1.2.3.4:1234", string(tgt[model.AddressLabel]), "Wrong target address.") + require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } diff --git a/discovery/marathon/metrics.go b/discovery/marathon/metrics.go new file mode 100644 index 0000000000..790c167471 --- /dev/null +++ b/discovery/marathon/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package marathon + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*marathonMetrics)(nil) + +type marathonMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *marathonMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *marathonMetrics) Unregister() {} diff --git a/discovery/metrics.go b/discovery/metrics.go new file mode 100644 index 0000000000..356be1ddcb --- /dev/null +++ b/discovery/metrics.go @@ -0,0 +1,100 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" +) + +// Metrics to be used with a discovery manager. +type Metrics struct { + FailedConfigs prometheus.Gauge + DiscoveredTargets *prometheus.GaugeVec + ReceivedUpdates prometheus.Counter + DelayedUpdates prometheus.Counter + SentUpdates prometheus.Counter +} + +func NewManagerMetrics(registerer prometheus.Registerer, sdManagerName string) (*Metrics, error) { + m := &Metrics{} + + m.FailedConfigs = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "prometheus_sd_failed_configs", + Help: "Current number of service discovery configurations that failed to load.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + m.DiscoveredTargets = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_sd_discovered_targets", + Help: "Current number of discovered targets.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + []string{"config"}, + ) + + m.ReceivedUpdates = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_received_updates_total", + Help: "Total number of update events received from the SD providers.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + m.DelayedUpdates = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_delayed_total", + Help: "Total number of update events that couldn't be sent immediately.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + m.SentUpdates = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_total", + Help: "Total number of update events sent to the SD consumers.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + metrics := []prometheus.Collector{ + m.FailedConfigs, + m.DiscoveredTargets, + m.ReceivedUpdates, + m.DelayedUpdates, + m.SentUpdates, + } + + for _, collector := range metrics { + err := registerer.Register(collector) + if err != nil { + return nil, fmt.Errorf("failed to register discovery manager metrics: %w", err) + } + } + + return m, nil +} + +// Unregister unregisters all metrics. +func (m *Metrics) Unregister(registerer prometheus.Registerer) { + registerer.Unregister(m.FailedConfigs) + registerer.Unregister(m.DiscoveredTargets) + registerer.Unregister(m.ReceivedUpdates) + registerer.Unregister(m.DelayedUpdates) + registerer.Unregister(m.SentUpdates) +} diff --git a/discovery/kubernetes/client_metrics.go b/discovery/metrics_k8s_client.go similarity index 79% rename from discovery/kubernetes/client_metrics.go rename to discovery/metrics_k8s_client.go index b316f7d885..c13ce53317 100644 --- a/discovery/kubernetes/client_metrics.go +++ b/discovery/metrics_k8s_client.go @@ -11,10 +11,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetes +package discovery import ( "context" + "fmt" "net/url" "time" @@ -23,13 +24,27 @@ import ( "k8s.io/client-go/util/workqueue" ) -const workqueueMetricsNamespace = metricsNamespace + "_workqueue" +// This file registers metrics used by the Kubernetes Go client (k8s.io/client-go). +// Unfortunately, k8s.io/client-go metrics are global. +// If we instantiate multiple k8s SD instances, their k8s/client-go metrics will overlap. +// To prevent us from displaying misleading metrics, we register k8s.io/client-go metrics +// outside of the Kubernetes SD. + +const ( + KubernetesMetricsNamespace = "prometheus_sd_kubernetes" + workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue" +) + +var ( + clientGoRequestMetrics = &clientGoRequestMetricAdapter{} + clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{} +) var ( // Metrics for client-go's HTTP requests. clientGoRequestResultMetricVec = prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: metricsNamespace, + Namespace: KubernetesMetricsNamespace, Name: "http_request_total", Help: "Total number of HTTP requests to the Kubernetes API by status code.", }, @@ -37,7 +52,7 @@ var ( ) clientGoRequestLatencyMetricVec = prometheus.NewSummaryVec( prometheus.SummaryOpts{ - Namespace: metricsNamespace, + Namespace: KubernetesMetricsNamespace, Name: "http_request_duration_seconds", Help: "Summary of latencies for HTTP requests to the Kubernetes API by endpoint.", Objectives: map[float64]float64{}, @@ -45,7 +60,7 @@ var ( []string{"endpoint"}, ) - // Definition of metrics for client-go workflow metrics provider + // Definition of metrics for client-go workflow metrics provider. clientGoWorkqueueDepthMetricVec = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: workqueueMetricsNamespace, @@ -106,20 +121,44 @@ func (noopMetric) Dec() {} func (noopMetric) Observe(float64) {} func (noopMetric) Set(float64) {} -// Definition of client-go metrics adapters for HTTP requests observation +// Definition of client-go metrics adapters for HTTP requests observation. type clientGoRequestMetricAdapter struct{} -func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer) { +// Returns all of the Prometheus metrics derived from k8s.io/client-go. +// This may be used tu register and unregister the metrics. +func clientGoMetrics() []prometheus.Collector { + return []prometheus.Collector{ + clientGoRequestResultMetricVec, + clientGoRequestLatencyMetricVec, + clientGoWorkqueueDepthMetricVec, + clientGoWorkqueueAddsMetricVec, + clientGoWorkqueueLatencyMetricVec, + clientGoWorkqueueUnfinishedWorkSecondsMetricVec, + clientGoWorkqueueLongestRunningProcessorMetricVec, + clientGoWorkqueueWorkDurationMetricVec, + } +} + +func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error { + clientGoRequestMetrics.RegisterWithK8sGoClient() + clientGoWorkloadMetrics.RegisterWithK8sGoClient() + + for _, collector := range clientGoMetrics() { + err := registerer.Register(collector) + if err != nil { + return fmt.Errorf("failed to register Kubernetes Go Client metrics: %w", err) + } + } + return nil +} + +func (f *clientGoRequestMetricAdapter) RegisterWithK8sGoClient() { metrics.Register( metrics.RegisterOpts{ RequestLatency: f, RequestResult: f, }, ) - registerer.MustRegister( - clientGoRequestResultMetricVec, - clientGoRequestLatencyMetricVec, - ) } func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) { @@ -130,19 +169,11 @@ func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.U clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) } -// Definition of client-go workqueue metrics provider definition +// Definition of client-go workqueue metrics provider definition. type clientGoWorkqueueMetricsProvider struct{} -func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Registerer) { +func (f *clientGoWorkqueueMetricsProvider) RegisterWithK8sGoClient() { workqueue.SetProvider(f) - registerer.MustRegister( - clientGoWorkqueueDepthMetricVec, - clientGoWorkqueueAddsMetricVec, - clientGoWorkqueueLatencyMetricVec, - clientGoWorkqueueWorkDurationMetricVec, - clientGoWorkqueueUnfinishedWorkSecondsMetricVec, - clientGoWorkqueueLongestRunningProcessorMetricVec, - ) } func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { diff --git a/discovery/metrics_refresh.go b/discovery/metrics_refresh.go new file mode 100644 index 0000000000..ef49e591a3 --- /dev/null +++ b/discovery/metrics_refresh.go @@ -0,0 +1,75 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// RefreshMetricsVecs are metric vectors for the "refresh" package. +// We define them here in the "discovery" package in order to avoid a cyclic dependency between +// "discovery" and "refresh". +type RefreshMetricsVecs struct { + failuresVec *prometheus.CounterVec + durationVec *prometheus.SummaryVec + + metricRegisterer MetricRegisterer +} + +var _ RefreshMetricsManager = (*RefreshMetricsVecs)(nil) + +func NewRefreshMetrics(reg prometheus.Registerer) RefreshMetricsManager { + m := &RefreshMetricsVecs{ + failuresVec: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_sd_refresh_failures_total", + Help: "Number of refresh failures for the given SD mechanism.", + }, + []string{"mechanism"}), + durationVec: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "prometheus_sd_refresh_duration_seconds", + Help: "The duration of a refresh in seconds for the given SD mechanism.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"mechanism"}), + } + + // The reason we register metric vectors instead of metrics is so that + // the metrics are not visible until they are recorded. + m.metricRegisterer = NewMetricRegisterer(reg, []prometheus.Collector{ + m.failuresVec, + m.durationVec, + }) + + return m +} + +// Instantiate returns metrics out of metric vectors. +func (m *RefreshMetricsVecs) Instantiate(mech string) *RefreshMetrics { + return &RefreshMetrics{ + Failures: m.failuresVec.WithLabelValues(mech), + Duration: m.durationVec.WithLabelValues(mech), + } +} + +// Register implements discovery.DiscovererMetrics. +func (m *RefreshMetricsVecs) Register() error { + return m.metricRegisterer.RegisterMetrics() +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *RefreshMetricsVecs) Unregister() { + m.metricRegisterer.UnregisterMetrics() +} diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 162833ece4..68f6fe3ccc 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -19,13 +19,17 @@ import ( "net" "net/http" "net/url" + "sort" "strconv" "time" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -57,6 +61,7 @@ var DefaultDockerSDConfig = DockerSDConfig{ Filters: []Filter{}, HostNetworkingHost: "localhost", HTTPClientConfig: config.DefaultHTTPClientConfig, + MatchFirstNetwork: true, } func init() { @@ -72,7 +77,15 @@ type DockerSDConfig struct { Filters []Filter `yaml:"filters"` HostNetworkingHost string `yaml:"host_networking_host"` - RefreshInterval model.Duration `yaml:"refresh_interval"` + RefreshInterval model.Duration `yaml:"refresh_interval"` + MatchFirstNetwork bool `yaml:"match_first_network"` +} + +// NewDiscovererMetrics implements discovery.Config. +func (*DockerSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &dockerMetrics{ + refreshMetrics: rmi, + } } // Name returns the name of the Config. @@ -80,7 +93,7 @@ func (*DockerSDConfig) Name() string { return "docker" } // NewDiscoverer returns a Discoverer for the Config. func (c *DockerSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDockerDiscovery(c, opts.Logger) + return NewDockerDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -111,15 +124,20 @@ type DockerDiscovery struct { port int hostNetworkingHost string filters filters.Args + matchFirstNetwork bool } // NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets. -func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger) (*DockerDiscovery, error) { - var err error +func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) { + m, ok := metrics.(*dockerMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } d := &DockerDiscovery{ port: conf.Port, hostNetworkingHost: conf.HostNetworkingHost, + matchFirstNetwork: conf.MatchFirstNetwork, } hostURL, err := url.Parse(conf.Host) @@ -165,10 +183,13 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger) (*DockerDiscove } d.Discovery = refresh.NewDiscovery( - logger, - "docker", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "docker", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } @@ -178,7 +199,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er Source: "Docker", } - containers, err := d.client.ContainerList(ctx, types.ContainerListOptions{Filters: d.filters}) + containers, err := d.client.ContainerList(ctx, container.ListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing containers: %w", err) } @@ -188,6 +209,11 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er return nil, fmt.Errorf("error while computing network labels: %w", err) } + allContainers := make(map[string]types.Container) + for _, c := range containers { + allContainers[c.ID] = c + } + for _, c := range containers { if len(c.Names) == 0 { continue @@ -204,7 +230,48 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er commonLabels[dockerLabelContainerLabelPrefix+ln] = v } - for _, n := range c.NetworkSettings.Networks { + networks := c.NetworkSettings.Networks + containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode) + if len(networks) == 0 { + // Try to lookup shared networks + for { + if containerNetworkMode.IsContainer() { + tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()] + if !exists { + break + } + networks = tmpContainer.NetworkSettings.Networks + containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode) + if len(networks) > 0 { + break + } + } else { + break + } + } + } + + if d.matchFirstNetwork && len(networks) > 1 { + // Sort networks by name and take first non-nil network. + keys := make([]string, 0, len(networks)) + for k, n := range networks { + if n != nil { + keys = append(keys, k) + } + } + if len(keys) > 0 { + sort.Strings(keys) + firstNetworkMode := keys[0] + firstNetwork := networks[firstNetworkMode] + networks = map[string]*network.EndpointSettings{firstNetworkMode: firstNetwork} + } + } + + for _, n := range networks { + if n == nil { + continue + } + var added bool for _, p := range c.Ports { diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go index bb84b1571a..398393a15a 100644 --- a/discovery/moby/docker_test.go +++ b/discovery/moby/docker_test.go @@ -16,12 +16,16 @@ package moby import ( "context" "fmt" + "sort" "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/discovery" ) func TestDockerSDRefresh(t *testing.T) { @@ -37,21 +41,206 @@ host: %s var cfg DockerSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) - d, err := NewDockerDiscovery(&cfg, log.NewNopLogger()) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics) + require.NoError(t, err) + + ctx := context.Background() + tgs, err := d.refresh(ctx) + require.NoError(t, err) + + require.Len(t, tgs, 1) + + tg := tgs[0] + require.NotNil(t, tg) + require.NotNil(t, tg.Targets) + require.Len(t, tg.Targets, 8) + + expected := []model.LabelSet{ + { + "__address__": "172.19.0.2:9100", + "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "node", + "__meta_docker_container_label_com_docker_compose_version": "1.25.0", + "__meta_docker_container_label_maintainer": "The Prometheus Authors ", + "__meta_docker_container_label_prometheus_job": "node", + "__meta_docker_container_name": "/dockersd_node_1", + "__meta_docker_container_network_mode": "dockersd_default", + "__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.19.0.2", + "__meta_docker_network_label_com_docker_compose_network": "default", + "__meta_docker_network_label_com_docker_compose_project": "dockersd", + "__meta_docker_network_label_com_docker_compose_version": "1.25.0", + "__meta_docker_network_name": "dockersd_default", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "9100", + }, + { + "__address__": "172.19.0.3:80", + "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "noport", + "__meta_docker_container_label_com_docker_compose_version": "1.25.0", + "__meta_docker_container_label_maintainer": "The Prometheus Authors ", + "__meta_docker_container_label_prometheus_job": "noport", + "__meta_docker_container_name": "/dockersd_noport_1", + "__meta_docker_container_network_mode": "dockersd_default", + "__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.19.0.3", + "__meta_docker_network_label_com_docker_compose_network": "default", + "__meta_docker_network_label_com_docker_compose_project": "dockersd", + "__meta_docker_network_label_com_docker_compose_version": "1.25.0", + "__meta_docker_network_name": "dockersd_default", + "__meta_docker_network_scope": "local", + }, + { + "__address__": "localhost", + "__meta_docker_container_id": "54ed6cc5c0988260436cb0e739b7b6c9cad6c439a93b4c4fdbe9753e1c94b189", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "host_networking", + "__meta_docker_container_label_com_docker_compose_version": "1.25.0", + "__meta_docker_container_name": "/dockersd_host_networking_1", + "__meta_docker_container_network_mode": "host", + "__meta_docker_network_ip": "", + }, + { + "__address__": "172.20.0.2:3306", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.2:33060", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + { + "__address__": "172.20.0.2:9104", + "__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysqlexporter", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_label_maintainer": "The Prometheus Authors ", + "__meta_docker_container_name": "/dockersd_mysql_exporter", + "__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "9104", + }, + { + "__address__": "172.20.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + } + sortFunc(expected) + sortFunc(tg.Targets) + + for i, lbls := range expected { + t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { + require.Equal(t, lbls, tg.Targets[i]) + }) + } +} + +func TestDockerSDRefreshMatchAllNetworks(t *testing.T) { + sdmock := NewSDMock(t, "dockerprom") + sdmock.Setup() + + e := sdmock.Endpoint() + url := e[:len(e)-1] + cfgString := fmt.Sprintf(` +--- +host: %s +`, url) + var cfg DockerSDConfig + require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) + + cfg.MatchFirstNetwork = false + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 3, len(tg.Targets)) + require.Len(t, tg.Targets, 13) - for i, lbls := range []model.LabelSet{ + expected := []model.LabelSet{ { "__address__": "172.19.0.2:9100", "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", @@ -103,9 +292,182 @@ host: %s "__meta_docker_container_network_mode": "host", "__meta_docker_network_ip": "", }, - } { + { + "__address__": "172.20.0.2:3306", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.2:33060", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + { + "__address__": "172.21.0.2:3306", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.2", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.21.0.2:33060", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.2", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + { + "__address__": "172.21.0.2:9104", + "__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysqlexporter", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_label_maintainer": "The Prometheus Authors ", + "__meta_docker_container_name": "/dockersd_mysql_exporter", + "__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.2", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "9104", + }, + { + "__address__": "172.20.0.2:9104", + "__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysqlexporter", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_label_maintainer": "The Prometheus Authors ", + "__meta_docker_container_name": "/dockersd_mysql_exporter", + "__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "9104", + }, + { + "__address__": "172.20.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + { + "__address__": "172.21.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.3", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.21.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.3", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + } + + sortFunc(expected) + sortFunc(tg.Targets) + + for i, lbls := range expected { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) } } + +func sortFunc(labelSets []model.LabelSet) { + sort.Slice(labelSets, func(i, j int) bool { + return labelSets[i]["__address__"] < labelSets[j]["__address__"] + }) +} diff --git a/discovery/moby/dockerswarm.go b/discovery/moby/dockerswarm.go index 371f9d5ed1..b0147467d2 100644 --- a/discovery/moby/dockerswarm.go +++ b/discovery/moby/dockerswarm.go @@ -23,6 +23,7 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -69,12 +70,19 @@ type Filter struct { Values []string `yaml:"values"` } +// NewDiscovererMetrics implements discovery.Config. +func (*DockerSwarmSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &dockerswarmMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the Config. func (*DockerSwarmSDConfig) Name() string { return "dockerswarm" } // NewDiscoverer returns a Discoverer for the Config. func (c *DockerSwarmSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -117,8 +125,11 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger) (*Discovery, error) { - var err error +func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*dockerswarmMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } d := &Discovery{ port: conf.Port, @@ -168,10 +179,13 @@ func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger) (*Discovery, err } d.Discovery = refresh.NewDiscovery( - logger, - "dockerswarm", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "dockerswarm", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } diff --git a/discovery/moby/metrics_docker.go b/discovery/moby/metrics_docker.go new file mode 100644 index 0000000000..457d520a0c --- /dev/null +++ b/discovery/moby/metrics_docker.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package moby + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*dockerMetrics)(nil) + +type dockerMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *dockerMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *dockerMetrics) Unregister() {} diff --git a/discovery/moby/metrics_dockerswarm.go b/discovery/moby/metrics_dockerswarm.go new file mode 100644 index 0000000000..227ba6c2b0 --- /dev/null +++ b/discovery/moby/metrics_dockerswarm.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package moby + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*dockerswarmMetrics)(nil) + +type dockerswarmMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *dockerswarmMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *dockerswarmMetrics) Unregister() {} diff --git a/discovery/moby/mock_test.go b/discovery/moby/mock_test.go index f6511ed26f..7ef5cb07c3 100644 --- a/discovery/moby/mock_test.go +++ b/discovery/moby/mock_test.go @@ -29,7 +29,7 @@ import ( "github.com/prometheus/prometheus/util/strutil" ) -// SDMock is the interface for the DigitalOcean mock +// SDMock is the interface for the DigitalOcean mock. type SDMock struct { t *testing.T Server *httptest.Server @@ -47,12 +47,12 @@ func NewSDMock(t *testing.T, directory string) *SDMock { } } -// Endpoint returns the URI to the mock server +// Endpoint returns the URI to the mock server. func (m *SDMock) Endpoint() string { return m.Server.URL + "/" } -// Setup creates the mock server +// Setup creates the mock server. func (m *SDMock) Setup() { m.Mux = http.NewServeMux() m.Server = httptest.NewServer(m.Mux) @@ -98,7 +98,7 @@ func (m *SDMock) SetupHandlers() { if len(query) == 2 { h := sha1.New() h.Write([]byte(query[1])) - // Avoing long filenames for Windows. + // Avoiding long filenames for Windows. f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10] } } diff --git a/discovery/moby/network.go b/discovery/moby/network.go index 0e0d0041de..ea1ca66bc7 100644 --- a/discovery/moby/network.go +++ b/discovery/moby/network.go @@ -15,9 +15,9 @@ package moby import ( "context" - "fmt" + "strconv" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/prometheus/prometheus/util/strutil" @@ -34,7 +34,7 @@ const ( ) func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) { - networks, err := client.NetworkList(ctx, types.NetworkListOptions{}) + networks, err := client.NetworkList(ctx, network.ListOptions{}) if err != nil { return nil, err } @@ -44,8 +44,8 @@ func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix s labelPrefix + labelNetworkID: network.ID, labelPrefix + labelNetworkName: network.Name, labelPrefix + labelNetworkScope: network.Scope, - labelPrefix + labelNetworkInternal: fmt.Sprintf("%t", network.Internal), - labelPrefix + labelNetworkIngress: fmt.Sprintf("%t", network.Ingress), + labelPrefix + labelNetworkInternal: strconv.FormatBool(network.Internal), + labelPrefix + labelNetworkIngress: strconv.FormatBool(network.Ingress), } for k, v := range network.Labels { ln := strutil.SanitizeLabelName(k) diff --git a/discovery/moby/nodes.go b/discovery/moby/nodes.go index 85092f9071..b5be844eda 100644 --- a/discovery/moby/nodes.go +++ b/discovery/moby/nodes.go @@ -66,7 +66,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err swarmLabelNodeAddress: model.LabelValue(n.Status.Addr), } if n.ManagerStatus != nil { - labels[swarmLabelNodeManagerLeader] = model.LabelValue(fmt.Sprintf("%t", n.ManagerStatus.Leader)) + labels[swarmLabelNodeManagerLeader] = model.LabelValue(strconv.FormatBool(n.ManagerStatus.Leader)) labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability) labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr) } @@ -80,7 +80,6 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) - } return []*targetgroup.Group{tg}, nil } diff --git a/discovery/moby/nodes_test.go b/discovery/moby/nodes_test.go index 1a53321378..4ad1088d1a 100644 --- a/discovery/moby/nodes_test.go +++ b/discovery/moby/nodes_test.go @@ -19,9 +19,12 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/discovery" ) func TestDockerSwarmNodesSDRefresh(t *testing.T) { @@ -38,19 +41,26 @@ host: %s var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 5, len(tg.Targets)) + require.Len(t, tg.Targets, 5) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/moby/services.go b/discovery/moby/services.go index 1d472b5c00..c61b499259 100644 --- a/discovery/moby/services.go +++ b/discovery/moby/services.go @@ -116,7 +116,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group, labels[model.LabelName(k)] = model.LabelValue(v) } - addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port)) + addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) diff --git a/discovery/moby/services_test.go b/discovery/moby/services_test.go index 1bc9832c7a..47ca69e33a 100644 --- a/discovery/moby/services_test.go +++ b/discovery/moby/services_test.go @@ -19,9 +19,12 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/discovery" ) func TestDockerSwarmSDServicesRefresh(t *testing.T) { @@ -38,19 +41,26 @@ host: %s var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 15, len(tg.Targets)) + require.Len(t, tg.Targets, 15) for i, lbls := range []model.LabelSet{ { @@ -332,19 +342,26 @@ filters: var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg, "tg should not be nil") require.NotNil(t, tg.Targets, "tg.targets should not be nil") - require.Equal(t, 4, len(tg.Targets)) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/moby/tasks.go b/discovery/moby/tasks.go index 2505a7b07a..38b9d33de2 100644 --- a/discovery/moby/tasks.go +++ b/discovery/moby/tasks.go @@ -150,7 +150,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err labels[model.LabelName(k)] = model.LabelValue(v) } - addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port)) + addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) diff --git a/discovery/moby/tasks_test.go b/discovery/moby/tasks_test.go index 2cc9322f61..ef71bc02f5 100644 --- a/discovery/moby/tasks_test.go +++ b/discovery/moby/tasks_test.go @@ -19,9 +19,12 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/discovery" ) func TestDockerSwarmTasksSDRefresh(t *testing.T) { @@ -38,19 +41,26 @@ host: %s var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 27, len(tg.Targets)) + require.Len(t, tg.Targets, 27) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/moby/testdata/dockerprom/containers/json.json b/discovery/moby/testdata/dockerprom/containers/json.json index 37f575d22c..33406bf9a4 100644 --- a/discovery/moby/testdata/dockerprom/containers/json.json +++ b/discovery/moby/testdata/dockerprom/containers/json.json @@ -128,5 +128,174 @@ } }, "Mounts": [] + }, + { + "Id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "Names": [ + "/dockersd_mysql" + ], + "Image": "mysql:5.7.29", + "ImageID": "sha256:5d9483f9a7b21c87e0f5b9776c3e06567603c28c0062013eda127c968175f5e8", + "Command": "mysqld", + "Created": 1616273136, + "Ports": [ + { + "PrivatePort": 3306, + "Type": "tcp" + }, + { + "PrivatePort": 33060, + "Type": "tcp" + } + ], + "Labels": { + "com.docker.compose.project": "dockersd", + "com.docker.compose.service": "mysql", + "com.docker.compose.version": "2.2.2" + }, + "State": "running", + "Status": "Up 40 seconds", + "HostConfig": { + "NetworkMode": "dockersd_private" + }, + "NetworkSettings": { + "Networks": { + "dockersd_private": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "EndpointID": "80f8a61b37701a9991bb98c75ddd23fd9b7c16b5575ca81343f6b44ff4a2a9d9", + "Gateway": "172.20.0.1", + "IPAddress": "172.20.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:14:00:0a", + "DriverOpts": null + }, + "dockersd_private1": { + "IPAMConfig": {}, + "Links": null, + "Aliases": [ + "mysql", + "mysql", + "f9ade4b83199" + ], + "NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "EndpointID": "f80921d10e78c99a5907705aae75befea40c3d3e9f820e66ab392f7274be16b8", + "Gateway": "172.21.0.1", + "IPAddress": "172.21.0.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:15:00:02", + "DriverOpts": null + } + } + }, + "Mounts": [] + }, + { + "Id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06", + "Names": [ + "/dockersd_mysql_exporter" + ], + "Image": "prom/mysqld-exporter:latest", + "ImageID": "sha256:121b8a7cd0525dd89aaec58ad7d34c3bb3714740e5a67daf6510ccf71ab219a9", + "Command": "/bin/mysqld_exporter", + "Created": 1616273136, + "Ports": [ + { + "PrivatePort": 9104, + "Type": "tcp" + } + ], + "Labels": { + "com.docker.compose.project": "dockersd", + "com.docker.compose.service": "mysqlexporter", + "com.docker.compose.version": "2.2.2", + "maintainer": "The Prometheus Authors " + }, + "State": "running", + "Status": "Up 40 seconds", + "HostConfig": { + "NetworkMode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8" + }, + "NetworkSettings": { + "Networks": {} + }, + "Mounts": [] + }, + { + "Id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "Names": [ + "/dockersd_multi_networks" + ], + "Image": "mysql:5.7.29", + "ImageID": "sha256:16ae2f4625ba63a250462bedeece422e741de9f0caf3b1d89fd5b257aca80cd1", + "Command": "mysqld", + "Created": 1616273136, + "Ports": [ + { + "PrivatePort": 3306, + "Type": "tcp" + }, + { + "PrivatePort": 33060, + "Type": "tcp" + } + ], + "Labels": { + "com.docker.compose.project": "dockersd", + "com.docker.compose.service": "mysql", + "com.docker.compose.version": "2.2.2" + }, + "State": "running", + "Status": "Up 40 seconds", + "HostConfig": { + "NetworkMode": "dockersd_private_none" + }, + "NetworkSettings": { + "Networks": { + "dockersd_private": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "EndpointID": "972d6807997369605ace863af58de6cb90c787a5bf2ffc4105662d393ae539b7", + "Gateway": "172.20.0.1", + "IPAddress": "172.20.0.3", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:14:00:02", + "DriverOpts": null + }, + "dockersd_private1": { + "IPAMConfig": {}, + "Links": null, + "Aliases": [ + "mysql", + "mysql", + "f9ade4b83199" + ], + "NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "EndpointID": "91a98405344ee1cb7d977cafabe634837876651544b32da20a5e0155868e6f5f", + "Gateway": "172.21.0.1", + "IPAddress": "172.21.0.3", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:15:00:02", + "DriverOpts": null + } + } + }, + "Mounts": [] } ] diff --git a/discovery/moby/testdata/dockerprom/networks.json b/discovery/moby/testdata/dockerprom/networks.json index 35facd3bb9..75d4442df8 100644 --- a/discovery/moby/testdata/dockerprom/networks.json +++ b/discovery/moby/testdata/dockerprom/networks.json @@ -111,5 +111,59 @@ "Containers": {}, "Options": {}, "Labels": {} + }, + { + "Name": "dockersd_private", + "Id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "Created": "2022-03-25T09:21:17.718370976+08:00", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Options": null, + "Config": [ + { + "Subnet": "172.20.0.1/16" + } + ] + }, + "Internal": false, + "Attachable": false, + "Ingress": false, + "ConfigFrom": { + "Network": "" + }, + "ConfigOnly": false, + "Containers": {}, + "Options": {}, + "Labels": {} + }, + { + "Name": "dockersd_private1", + "Id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "Created": "2022-03-25T09:21:17.718370976+08:00", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Options": null, + "Config": [ + { + "Subnet": "172.21.0.1/16" + } + ] + }, + "Internal": false, + "Attachable": false, + "Ingress": false, + "ConfigFrom": { + "Network": "" + }, + "ConfigOnly": false, + "Containers": {}, + "Options": {}, + "Labels": {} } ] diff --git a/discovery/moby/testdata/swarmprom/services.json b/discovery/moby/testdata/swarmprom/services.json index 72caa7a7f8..8f6c0793dd 100644 --- a/discovery/moby/testdata/swarmprom/services.json +++ b/discovery/moby/testdata/swarmprom/services.json @@ -224,7 +224,7 @@ "Args": [ "--config.file=/etc/prometheus/prometheus.yml", "--storage.tsdb.path=/prometheus", - "--storage.tsdb.retention=24h" + "--storage.tsdb.retention.time=24h" ], "Privileges": { "CredentialSpec": null, diff --git a/discovery/moby/testdata/swarmprom/tasks.json b/discovery/moby/testdata/swarmprom/tasks.json index 33d81f25ce..af5ff9fe28 100644 --- a/discovery/moby/testdata/swarmprom/tasks.json +++ b/discovery/moby/testdata/swarmprom/tasks.json @@ -973,7 +973,7 @@ "Args": [ "--config.file=/etc/prometheus/prometheus.yml", "--storage.tsdb.path=/prometheus", - "--storage.tsdb.retention=24h" + "--storage.tsdb.retention.time=24h" ], "Privileges": { "CredentialSpec": null, diff --git a/discovery/nomad/metrics.go b/discovery/nomad/metrics.go new file mode 100644 index 0000000000..9707153d91 --- /dev/null +++ b/discovery/nomad/metrics.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nomad + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*nomadMetrics)(nil) + +type nomadMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator + + failuresCount prometheus.Counter + + metricRegisterer discovery.MetricRegisterer +} + +func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + m := &nomadMetrics{ + refreshMetrics: rmi, + failuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_nomad_failures_total", + Help: "Number of nomad service discovery refresh failures.", + }), + } + + m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ + m.failuresCount, + }) + + return m +} + +// Register implements discovery.DiscovererMetrics. +func (m *nomadMetrics) Register() error { + return m.metricRegisterer.RegisterMetrics() +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *nomadMetrics) Unregister() { + m.metricRegisterer.UnregisterMetrics() +} diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go index 7013f0737c..d9c48120ae 100644 --- a/discovery/nomad/nomad.go +++ b/discovery/nomad/nomad.go @@ -49,27 +49,18 @@ const ( ) // DefaultSDConfig is the default nomad SD configuration. -var ( - DefaultSDConfig = SDConfig{ - AllowStale: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, - Namespace: "default", - RefreshInterval: model.Duration(60 * time.Second), - Region: "global", - Server: "http://localhost:4646", - TagSeparator: ",", - } - - failuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_nomad_failures_total", - Help: "Number of nomad service discovery refresh failures.", - }) -) +var DefaultSDConfig = SDConfig{ + AllowStale: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, + Namespace: "default", + RefreshInterval: model.Duration(60 * time.Second), + Region: "global", + Server: "http://localhost:4646", + TagSeparator: ",", +} func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(failuresCount) } // SDConfig is the configuration for nomad based service discovery. @@ -83,12 +74,17 @@ type SDConfig struct { TagSeparator string `yaml:"tag_separator,omitempty"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return newDiscovererMetrics(reg, rmi) +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "nomad" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -121,10 +117,16 @@ type Discovery struct { region string server string tagSeparator string + metrics *nomadMetrics } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*nomadMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + d := &Discovery{ allowStale: conf.AllowStale, namespace: conf.Namespace, @@ -132,6 +134,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { region: conf.Region, server: conf.Server, tagSeparator: conf.TagSeparator, + metrics: m, } HTTPClient, err := config.NewClientFromConfig(conf.HTTPClientConfig, "nomad_sd") @@ -153,10 +156,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { d.client = client d.Discovery = refresh.NewDiscovery( - logger, - "nomad", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "nomad", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } @@ -167,7 +173,7 @@ func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) { } stubs, _, err := d.client.Services().List(opts) if err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, err } @@ -179,7 +185,7 @@ func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) { for _, service := range stub.Services { instances, _, err := d.client.Services().Get(service.ServiceName, opts) if err != nil { - failuresCount.Inc() + d.metrics.failuresCount.Inc() return nil, fmt.Errorf("failed to fetch services: %w", err) } diff --git a/discovery/nomad/nomad_test.go b/discovery/nomad/nomad_test.go index 40d412d74f..357d4a8e9b 100644 --- a/discovery/nomad/nomad_test.go +++ b/discovery/nomad/nomad_test.go @@ -22,15 +22,18 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/discovery" ) type NomadSDTestSuite struct { Mock *SDMock } -// SDMock is the interface for the nomad mock +// SDMock is the interface for the nomad mock. type SDMock struct { t *testing.T Server *httptest.Server @@ -127,8 +130,16 @@ func TestConfiguredService(t *testing.T) { conf := &SDConfig{ Server: "http://localhost:4646", } - _, err := NewDiscovery(conf, nil) + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + + _, err := NewDiscovery(conf, nil, metrics) require.NoError(t, err) + + metrics.Unregister() } func TestNomadSDRefresh(t *testing.T) { @@ -141,18 +152,26 @@ func TestNomadSDRefresh(t *testing.T) { cfg := DefaultSDConfig cfg.Server = endpoint.String() - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) tgs, err := d.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 1, len(tg.Targets)) + require.Len(t, tg.Targets, 1) lbls := model.LabelSet{ "__address__": model.LabelValue("127.0.0.1:30456"), diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go index 16964cfb62..8964da9294 100644 --- a/discovery/openstack/hypervisor.go +++ b/discovery/openstack/hypervisor.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "net" + "strconv" "github.com/go-kit/log" "github.com/gophercloud/gophercloud" @@ -72,7 +73,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group } tg := &targetgroup.Group{ - Source: fmt.Sprintf("OS_" + h.region), + Source: "OS_" + h.region, } // OpenStack API reference // https://developer.openstack.org/api-ref/compute/#list-hypervisors-details @@ -84,7 +85,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group } for _, hypervisor := range hypervisorList { labels := model.LabelSet{} - addr := net.JoinHostPort(hypervisor.HostIP, fmt.Sprintf("%d", h.port)) + addr := net.JoinHostPort(hypervisor.HostIP, strconv.Itoa(h.port)) labels[model.AddressLabel] = model.LabelValue(addr) labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID) labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname) diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go index 396d5283dc..45684b4a2e 100644 --- a/discovery/openstack/hypervisor_test.go +++ b/discovery/openstack/hypervisor_test.go @@ -53,12 +53,12 @@ func TestOpenstackSDHypervisorRefresh(t *testing.T) { hypervisor, _ := mock.openstackAuthSuccess() ctx := context.Background() tgs, err := hypervisor.refresh(ctx) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NoError(t, err) require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 2, len(tg.Targets)) + require.Len(t, tg.Targets, 2) for l, v := range map[string]string{ "__address__": "172.16.70.14:0", diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index b2fe1e7870..78c669e6f7 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "net" + "strconv" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -120,7 +121,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, } pager := servers.List(client, opts) tg := &targetgroup.Group{ - Source: fmt.Sprintf("OS_" + i.region), + Source: "OS_" + i.region, } err = pager.EachPage(func(page pagination.Page) (bool, error) { if ctx.Err() != nil { @@ -145,16 +146,22 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, openstackLabelUserID: model.LabelValue(s.UserID), } - flavorId, ok := s.Flavor["id"].(string) - if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string") - continue + flavorName, nameOk := s.Flavor["original_name"].(string) + // "original_name" is only available for microversion >= 2.47. It was added in favor of "id". + if !nameOk { + flavorID, idOk := s.Flavor["id"].(string) + if !idOk { + level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string") + continue + } + labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID) + } else { + labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorName) } - labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorId) - imageId, ok := s.Image["id"].(string) + imageID, ok := s.Image["id"].(string) if ok { - labels[openstackLabelInstanceImage] = model.LabelValue(imageId) + labels[openstackLabelInstanceImage] = model.LabelValue(imageID) } for k, v := range s.Metadata { @@ -194,7 +201,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok { lbls[openstackLabelPublicIP] = model.LabelValue(val) } - addr = net.JoinHostPort(addr, fmt.Sprintf("%d", i.port)) + addr = net.JoinHostPort(addr, strconv.Itoa(i.port)) lbls[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, lbls) diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index d2da5d9681..2b5ac1b89e 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -61,12 +61,12 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { tgs, err := instance.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 4, len(tg.Targets)) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { @@ -84,7 +84,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { }, { "__address__": model.LabelValue("10.0.0.31:0"), - "__meta_openstack_instance_flavor": model.LabelValue("1"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.medium"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"), "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), @@ -96,7 +96,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { }, { "__address__": model.LabelValue("10.0.0.33:0"), - "__meta_openstack_instance_flavor": model.LabelValue("4"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("merp"), @@ -108,7 +108,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { }, { "__address__": model.LabelValue("10.0.0.34:0"), - "__meta_openstack_instance_flavor": model.LabelValue("4"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("merp"), diff --git a/discovery/openstack/metrics.go b/discovery/openstack/metrics.go new file mode 100644 index 0000000000..a64c1a6732 --- /dev/null +++ b/discovery/openstack/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openstack + +import ( + "github.com/prometheus/prometheus/discovery" +) + +type openstackMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +var _ discovery.DiscovererMetrics = (*openstackMetrics)(nil) + +// Register implements discovery.DiscovererMetrics. +func (m *openstackMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *openstackMetrics) Unregister() {} diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index e64c336482..4518f41166 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -18,9 +18,11 @@ import ( "net/http" "net/http/httptest" "testing" + + "github.com/stretchr/testify/require" ) -// SDMock is the interface for the OpenStack mock +// SDMock is the interface for the OpenStack mock. type SDMock struct { t *testing.T Server *httptest.Server @@ -34,12 +36,12 @@ func NewSDMock(t *testing.T) *SDMock { } } -// Endpoint returns the URI to the mock server +// Endpoint returns the URI to the mock server. func (m *SDMock) Endpoint() string { return m.Server.URL + "/" } -// Setup creates the mock server +// Setup creates the mock server. func (m *SDMock) Setup() { m.Mux = http.NewServeMux() m.Server = httptest.NewServer(m.Mux) @@ -49,18 +51,16 @@ func (m *SDMock) Setup() { const tokenID = "cbc36478b0bd8e67e89469c7749d4127" func testMethod(t *testing.T, r *http.Request, expected string) { - if expected != r.Method { - t.Errorf("Request method = %v, expected %v", r.Method, expected) - } + require.Equal(t, expected, r.Method, "Unexpected request method.") } func testHeader(t *testing.T, r *http.Request, header, expected string) { - if actual := r.Header.Get(header); expected != actual { - t.Errorf("Header %s = %s, expected %s", header, actual, expected) - } + t.Helper() + actual := r.Header.Get(header) + require.Equal(t, expected, actual, "Unexpected value for request header %s.", header) } -// HandleVersionsSuccessfully mocks version call +// HandleVersionsSuccessfully mocks version call. func (m *SDMock) HandleVersionsSuccessfully() { m.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, ` @@ -88,7 +88,7 @@ func (m *SDMock) HandleVersionsSuccessfully() { }) } -// HandleAuthSuccessfully mocks auth call +// HandleAuthSuccessfully mocks auth call. func (m *SDMock) HandleAuthSuccessfully() { m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) { w.Header().Add("X-Subject-Token", tokenID) @@ -236,10 +236,10 @@ const hypervisorListBody = ` ] }` -// HandleHypervisorListSuccessfully mocks os-hypervisors detail call +// HandleHypervisorListSuccessfully mocks os-hypervisors detail call. func (m *SDMock) HandleHypervisorListSuccessfully() { m.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { - testMethod(m.t, r, "GET") + testMethod(m.t, r, http.MethodGet) testHeader(m.t, r, "X-Auth-Token", tokenID) w.Header().Add("Content-Type", "application/json") @@ -427,13 +427,17 @@ const serverListBody = ` "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "flavor": { - "id": "1", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", - "rel": "bookmark" - } - ] + "vcpus": 2, + "ram": 4096, + "disk": 0, + "ephemeral": 0, + "swap": 0, + "original_name": "m1.medium", + "extra_specs": { + "aggregate_instance_extra_specs:general": "true", + "hw:mem_page_size": "large", + "hw:vif_multiqueue_enabled": "true" + } }, "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", "security_groups": [ @@ -498,13 +502,17 @@ const serverListBody = ` "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "flavor": { - "id": "4", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", - "rel": "bookmark" - } - ] + "vcpus": 2, + "ram": 4096, + "disk": 0, + "ephemeral": 0, + "swap": 0, + "original_name": "m1.small", + "extra_specs": { + "aggregate_instance_extra_specs:general": "true", + "hw:mem_page_size": "large", + "hw:vif_multiqueue_enabled": "true" + } }, "id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", "security_groups": [ @@ -533,10 +541,10 @@ const serverListBody = ` } ` -// HandleServerListSuccessfully mocks server detail call +// HandleServerListSuccessfully mocks server detail call. func (m *SDMock) HandleServerListSuccessfully() { m.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) { - testMethod(m.t, r, "GET") + testMethod(m.t, r, http.MethodGet) testHeader(m.t, r, "X-Auth-Token", tokenID) w.Header().Add("Content-Type", "application/json") @@ -572,10 +580,10 @@ const listOutput = ` } ` -// HandleFloatingIPListSuccessfully mocks floating ips call +// HandleFloatingIPListSuccessfully mocks floating ips call. func (m *SDMock) HandleFloatingIPListSuccessfully() { m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { - testMethod(m.t, r, "GET") + testMethod(m.t, r, http.MethodGet) testHeader(m.t, r, "X-Auth-Token", tokenID) w.Header().Add("Content-Type", "application/json") diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index 92c83a4cf4..c98f78788d 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -24,6 +24,7 @@ import ( "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/mwitkow/go-conntrack" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -65,12 +66,19 @@ type SDConfig struct { Availability string `yaml:"availability,omitempty"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &openstackMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "openstack" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -134,16 +142,24 @@ type refresher interface { } // NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, l log.Logger) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { + m, ok := metrics.(*openstackMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + r, err := newRefresher(conf, l) if err != nil { return nil, err } return refresh.NewDiscovery( - l, - "openstack", - time.Duration(conf.RefreshInterval), - r.refresh, + refresh.Options{ + Logger: l, + Mech: "openstack", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: r.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ), nil } diff --git a/discovery/ovhcloud/dedicated_server.go b/discovery/ovhcloud/dedicated_server.go index bb5dadcd7b..a70857a08b 100644 --- a/discovery/ovhcloud/dedicated_server.go +++ b/discovery/ovhcloud/dedicated_server.go @@ -144,12 +144,12 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou model.InstanceLabel: model.LabelValue(server.Name), dedicatedServerLabelPrefix + "state": model.LabelValue(server.State), dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange), - dedicatedServerLabelPrefix + "link_speed": model.LabelValue(fmt.Sprintf("%d", server.LinkSpeed)), + dedicatedServerLabelPrefix + "link_speed": model.LabelValue(strconv.Itoa(server.LinkSpeed)), dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack), dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)), dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os), dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel), - dedicatedServerLabelPrefix + "server_id": model.LabelValue(fmt.Sprintf("%d", server.ServerID)), + dedicatedServerLabelPrefix + "server_id": model.LabelValue(strconv.FormatInt(server.ServerID, 10)), dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse), dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter), dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name), diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go index e8ffa4a283..52311bcc87 100644 --- a/discovery/ovhcloud/dedicated_server_test.go +++ b/discovery/ovhcloud/dedicated_server_test.go @@ -47,11 +47,11 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr targetGroups, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup) require.NotNil(t, targetGroup.Targets) - require.Equal(t, 1, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 1) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/ovhcloud/metrics.go b/discovery/ovhcloud/metrics.go new file mode 100644 index 0000000000..1c51709469 --- /dev/null +++ b/discovery/ovhcloud/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovhcloud + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*ovhcloudMetrics)(nil) + +type ovhcloudMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *ovhcloudMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *ovhcloudMetrics) Unregister() {} diff --git a/discovery/ovhcloud/ovhcloud.go b/discovery/ovhcloud/ovhcloud.go index 535ade4df5..988b4482f2 100644 --- a/discovery/ovhcloud/ovhcloud.go +++ b/discovery/ovhcloud/ovhcloud.go @@ -22,6 +22,7 @@ import ( "github.com/go-kit/log" "github.com/ovh/go-ovh/ovh" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -52,6 +53,13 @@ type SDConfig struct { Service string `yaml:"service"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &ovhcloudMetrics{ + refreshMetrics: rmi, + } +} + // Name implements the Discoverer interface. func (c SDConfig) Name() string { return "ovhcloud" @@ -93,7 +101,7 @@ func createClient(config *SDConfig) (*ovh.Client, error) { // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, options.Logger) + return NewDiscovery(c, options.Logger, options.Metrics) } func init() { @@ -140,16 +148,24 @@ func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) { } // NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { + m, ok := metrics.(*ovhcloudMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + r, err := newRefresher(conf, logger) if err != nil { return nil, err } return refresh.NewDiscovery( - logger, - "ovhcloud", - time.Duration(conf.RefreshInterval), - r.refresh, + refresh.Options{ + Logger: logger, + Mech: "ovhcloud", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: r.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ), nil } diff --git a/discovery/ovhcloud/ovhcloud_test.go b/discovery/ovhcloud/ovhcloud_test.go index efcd95bb0d..9c95bf90e6 100644 --- a/discovery/ovhcloud/ovhcloud_test.go +++ b/discovery/ovhcloud/ovhcloud_test.go @@ -18,6 +18,7 @@ import ( "fmt" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -65,7 +66,7 @@ endpoint: %s _, err := createClient(&conf) - require.ErrorContains(t, err, "missing application key") + require.ErrorContains(t, err, "missing authentication information") } func TestParseIPs(t *testing.T) { @@ -121,8 +122,17 @@ func TestParseIPs(t *testing.T) { func TestDiscoverer(t *testing.T) { conf, _ := getMockConf("vps") logger := testutil.NewLogger(t) + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + _, err := conf.NewDiscoverer(discovery.DiscovererOptions{ - Logger: logger, + Logger: logger, + Metrics: metrics, }) require.NoError(t, err) diff --git a/discovery/ovhcloud/vps.go b/discovery/ovhcloud/vps.go index e2d1dee364..58ceeabd87 100644 --- a/discovery/ovhcloud/vps.go +++ b/discovery/ovhcloud/vps.go @@ -19,6 +19,7 @@ import ( "net/netip" "net/url" "path" + "strconv" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -161,21 +162,21 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { model.InstanceLabel: model.LabelValue(server.Name), vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer), vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)), - vpsLabelPrefix + "model_vcore": model.LabelValue(fmt.Sprintf("%d", server.Model.Vcore)), - vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(fmt.Sprintf("%d", server.Model.MaximumAdditionalIP)), + vpsLabelPrefix + "model_vcore": model.LabelValue(strconv.Itoa(server.Model.Vcore)), + vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(strconv.Itoa(server.Model.MaximumAdditionalIP)), vpsLabelPrefix + "version": model.LabelValue(server.Model.Version), vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name), - vpsLabelPrefix + "disk": model.LabelValue(fmt.Sprintf("%d", server.Model.Disk)), - vpsLabelPrefix + "memory": model.LabelValue(fmt.Sprintf("%d", server.Model.Memory)), + vpsLabelPrefix + "disk": model.LabelValue(strconv.Itoa(server.Model.Disk)), + vpsLabelPrefix + "memory": model.LabelValue(strconv.Itoa(server.Model.Memory)), vpsLabelPrefix + "zone": model.LabelValue(server.Zone), vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName), vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster), vpsLabelPrefix + "state": model.LabelValue(server.State), vpsLabelPrefix + "name": model.LabelValue(server.Name), vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode), - vpsLabelPrefix + "memory_limit": model.LabelValue(fmt.Sprintf("%d", server.MemoryLimit)), + vpsLabelPrefix + "memory_limit": model.LabelValue(strconv.Itoa(server.MemoryLimit)), vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType), - vpsLabelPrefix + "vcore": model.LabelValue(fmt.Sprintf("%d", server.Vcore)), + vpsLabelPrefix + "vcore": model.LabelValue(strconv.Itoa(server.Vcore)), vpsLabelPrefix + "ipv4": model.LabelValue(ipv4), vpsLabelPrefix + "ipv6": model.LabelValue(ipv6), } diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go index b1177f215e..2d2d6dcd21 100644 --- a/discovery/ovhcloud/vps_test.go +++ b/discovery/ovhcloud/vps_test.go @@ -49,11 +49,11 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr targetGroups, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup) require.NotNil(t, targetGroup.Targets) - require.Equal(t, 1, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 1) for i, lbls := range []model.LabelSet{ { "__address__": "192.0.2.1", diff --git a/discovery/puppetdb/metrics.go b/discovery/puppetdb/metrics.go new file mode 100644 index 0000000000..2f5faa57ce --- /dev/null +++ b/discovery/puppetdb/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package puppetdb + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*puppetdbMetrics)(nil) + +type puppetdbMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *puppetdbMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *puppetdbMetrics) Unregister() {} diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go index f22a2e22b5..8f89acbf93 100644 --- a/discovery/puppetdb/puppetdb.go +++ b/discovery/puppetdb/puppetdb.go @@ -29,6 +29,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/regexp" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -78,12 +79,19 @@ type SDConfig struct { Port int `yaml:"port"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &puppetdbMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "puppetdb" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -115,7 +123,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Query == "" { return fmt.Errorf("query missing") } - return nil + return c.HTTPClientConfig.Validate() } // Discovery provides service discovery functionality based @@ -130,7 +138,12 @@ type Discovery struct { } // NewDiscovery returns a new PuppetDB discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*puppetdbMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + if logger == nil { logger = log.NewNopLogger() } @@ -156,10 +169,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { } d.Discovery = refresh.NewDiscovery( - logger, - "http", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "puppetdb", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } @@ -173,7 +189,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { return nil, err } - req, err := http.NewRequest("POST", d.url, bytes.NewBuffer(bodyBytes)) + req, err := http.NewRequest(http.MethodPost, d.url, bytes.NewBuffer(bodyBytes)) if err != nil { return nil, err } @@ -221,7 +237,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { pdbLabelResource: model.LabelValue(resource.Resource), pdbLabelType: model.LabelValue(resource.Type), pdbLabelTitle: model.LabelValue(resource.Title), - pdbLabelExported: model.LabelValue(fmt.Sprintf("%t", resource.Exported)), + pdbLabelExported: model.LabelValue(strconv.FormatBool(resource.Exported)), pdbLabelFile: model.LabelValue(resource.File), pdbLabelEnvironment: model.LabelValue(resource.Environment), } diff --git a/discovery/puppetdb/puppetdb_test.go b/discovery/puppetdb/puppetdb_test.go index 5514787d46..bf9c7b215e 100644 --- a/discovery/puppetdb/puppetdb_test.go +++ b/discovery/puppetdb/puppetdb_test.go @@ -23,10 +23,12 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -62,9 +64,17 @@ func TestPuppetSlashInURL(t *testing.T) { Port: 80, RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) require.Equal(t, apiURL, d.url) + + metrics.Unregister() } } @@ -79,7 +89,12 @@ func TestPuppetDBRefresh(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -105,7 +120,9 @@ func TestPuppetDBRefresh(t *testing.T) { Source: ts.URL + "/pdb/query/v4?query=vhosts", }, } - require.Equal(t, tgs, expectedTargets) + require.Equal(t, expectedTargets, tgs) + + metrics.Unregister() } func TestPuppetDBRefreshWithParameters(t *testing.T) { @@ -120,7 +137,12 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -156,7 +178,9 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) { Source: ts.URL + "/pdb/query/v4?query=vhosts", }, } - require.Equal(t, tgs, expectedTargets) + require.Equal(t, expectedTargets, tgs) + + metrics.Unregister() } func TestPuppetDBInvalidCode(t *testing.T) { @@ -172,12 +196,19 @@ func TestPuppetDBInvalidCode(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() _, err = d.refresh(ctx) require.EqualError(t, err, "server returned HTTP status 400 Bad Request") + + metrics.Unregister() } func TestPuppetDBInvalidFormat(t *testing.T) { @@ -193,10 +224,17 @@ func TestPuppetDBInvalidFormat(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() _, err = d.refresh(ctx) require.EqualError(t, err, "unsupported content type text/plain; charset=utf-8") + + metrics.Unregister() } diff --git a/discovery/refresh/refresh.go b/discovery/refresh/refresh.go index 919567a53b..f037a90cff 100644 --- a/discovery/refresh/refresh.go +++ b/discovery/refresh/refresh.go @@ -20,31 +20,17 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - failuresCount = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_refresh_failures_total", - Help: "Number of refresh failures for the given SD mechanism.", - }, - []string{"mechanism"}, - ) - duration = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: "prometheus_sd_refresh_duration_seconds", - Help: "The duration of a refresh in seconds for the given SD mechanism.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - []string{"mechanism"}, - ) -) - -func init() { - prometheus.MustRegister(duration, failuresCount) +type Options struct { + Logger log.Logger + Mech string + Interval time.Duration + RefreshF func(ctx context.Context) ([]*targetgroup.Group, error) + MetricsInstantiator discovery.RefreshMetricsInstantiator } // Discovery implements the Discoverer interface. @@ -52,23 +38,28 @@ type Discovery struct { logger log.Logger interval time.Duration refreshf func(ctx context.Context) ([]*targetgroup.Group, error) - - failures prometheus.Counter - duration prometheus.Observer + metrics *discovery.RefreshMetrics } // NewDiscovery returns a Discoverer function that calls a refresh() function at every interval. -func NewDiscovery(l log.Logger, mech string, interval time.Duration, refreshf func(ctx context.Context) ([]*targetgroup.Group, error)) *Discovery { - if l == nil { - l = log.NewNopLogger() +func NewDiscovery(opts Options) *Discovery { + m := opts.MetricsInstantiator.Instantiate(opts.Mech) + + var logger log.Logger + if opts.Logger == nil { + logger = log.NewNopLogger() + } else { + logger = opts.Logger } - return &Discovery{ - logger: l, - interval: interval, - refreshf: refreshf, - failures: failuresCount.WithLabelValues(mech), - duration: duration.WithLabelValues(mech), + + d := Discovery{ + logger: logger, + interval: opts.Interval, + refreshf: opts.RefreshF, + metrics: m, } + + return &d } // Run implements the Discoverer interface. @@ -115,12 +106,12 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { now := time.Now() defer func() { - d.duration.Observe(time.Since(now).Seconds()) + d.metrics.Duration.Observe(time.Since(now).Seconds()) }() tgs, err := d.refreshf(ctx) if err != nil { - d.failures.Inc() + d.metrics.Failures.Inc() } return tgs, err } diff --git a/discovery/refresh/refresh_test.go b/discovery/refresh/refresh_test.go index 6decef19fc..b70a326355 100644 --- a/discovery/refresh/refresh_test.go +++ b/discovery/refresh/refresh_test.go @@ -19,10 +19,12 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -65,7 +67,20 @@ func TestRefresh(t *testing.T) { return nil, fmt.Errorf("some error") } interval := time.Millisecond - d := NewDiscovery(nil, "test", interval, refresh) + + metrics := discovery.NewRefreshMetrics(prometheus.NewRegistry()) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + + d := NewDiscovery( + Options{ + Logger: nil, + Mech: "test", + Interval: interval, + RefreshF: refresh, + MetricsInstantiator: metrics, + }, + ) ch := make(chan []*targetgroup.Group) ctx, cancel := context.WithCancel(context.Background()) @@ -82,7 +97,7 @@ func TestRefresh(t *testing.T) { defer tick.Stop() select { case <-ch: - t.Fatal("Unexpected target group") + require.FailNow(t, "Unexpected target group") case <-tick.C: } } diff --git a/discovery/registry.go b/discovery/registry.go index 13168a07a7..1f491d4ca9 100644 --- a/discovery/registry.go +++ b/discovery/registry.go @@ -24,6 +24,8 @@ import ( "gopkg.in/yaml.v2" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -258,3 +260,24 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { } return err } + +// RegisterSDMetrics registers the metrics used by service discovery mechanisms. +// RegisterSDMetrics should be called only once during the lifetime of the Prometheus process. +// There is no need for the Prometheus process to unregister the metrics. +func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) { + err := rmm.Register() + if err != nil { + return nil, fmt.Errorf("failed to create service discovery refresh metrics") + } + + metrics := make(map[string]DiscovererMetrics) + for _, conf := range configNames { + currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm) + err = currentSdMetrics.Register() + if err != nil { + return nil, fmt.Errorf("failed to create service discovery metrics") + } + metrics[conf.Name()] = currentSdMetrics + } + return metrics, nil +} diff --git a/discovery/scaleway/instance.go b/discovery/scaleway/instance.go index 67311216d0..2542c63253 100644 --- a/discovery/scaleway/instance.go +++ b/discovery/scaleway/instance.go @@ -174,23 +174,27 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, labels[instanceTagsLabel] = model.LabelValue(tags) } - if server.IPv6 != nil { - labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) + addr := "" + if server.IPv6 != nil { //nolint:staticcheck + labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) //nolint:staticcheck + addr = server.IPv6.Address.String() //nolint:staticcheck } - if server.PublicIP != nil { - labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) + if server.PublicIP != nil { //nolint:staticcheck + labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) //nolint:staticcheck + addr = server.PublicIP.Address.String() //nolint:staticcheck } if server.PrivateIP != nil { labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP) + addr = *server.PrivateIP + } - addr := net.JoinHostPort(*server.PrivateIP, strconv.FormatUint(uint64(d.port), 10)) + if addr != "" { + addr := net.JoinHostPort(addr, strconv.FormatUint(uint64(d.port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) - targets = append(targets, labels) } - } return []*targetgroup.Group{{Source: "scaleway", Targets: targets}}, nil diff --git a/discovery/scaleway/instance_test.go b/discovery/scaleway/instance_test.go index e7a32dd924..ae70a9ed25 100644 --- a/discovery/scaleway/instance_test.go +++ b/discovery/scaleway/instance_test.go @@ -55,12 +55,12 @@ api_url: %s tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 2, len(tg.Targets)) + require.Len(t, tg.Targets, 3) for i, lbls := range []model.LabelSet{ { @@ -110,6 +110,28 @@ api_url: %s "__meta_scaleway_instance_type": "DEV1-S", "__meta_scaleway_instance_zone": "fr-par-1", }, + { + "__address__": "51.158.183.115:80", + "__meta_scaleway_instance_boot_type": "local", + "__meta_scaleway_instance_hostname": "routed-dualstack", + "__meta_scaleway_instance_id": "4904366a-7e26-4b65-b97b-6392c761247a", + "__meta_scaleway_instance_image_arch": "x86_64", + "__meta_scaleway_instance_image_id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160", + "__meta_scaleway_instance_image_name": "Ubuntu 22.04 Jammy Jellyfish", + "__meta_scaleway_instance_location_cluster_id": "19", + "__meta_scaleway_instance_location_hypervisor_id": "1201", + "__meta_scaleway_instance_location_node_id": "24", + "__meta_scaleway_instance_name": "routed-dualstack", + "__meta_scaleway_instance_organization_id": "20b3d507-96ac-454c-a795-bc731b46b12f", + "__meta_scaleway_instance_project_id": "20b3d507-96ac-454c-a795-bc731b46b12f", + "__meta_scaleway_instance_public_ipv4": "51.158.183.115", + "__meta_scaleway_instance_region": "nl-ams", + "__meta_scaleway_instance_security_group_id": "984414da-9fc2-49c0-a925-fed6266fe092", + "__meta_scaleway_instance_security_group_name": "Default security group", + "__meta_scaleway_instance_status": "running", + "__meta_scaleway_instance_type": "DEV1-S", + "__meta_scaleway_instance_zone": "nl-ams-1", + }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) @@ -161,5 +183,5 @@ api_url: %s tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) } diff --git a/discovery/scaleway/metrics.go b/discovery/scaleway/metrics.go new file mode 100644 index 0000000000..a8277d0fb3 --- /dev/null +++ b/discovery/scaleway/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scaleway + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*scalewayMetrics)(nil) + +type scalewayMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *scalewayMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *scalewayMetrics) Unregister() {} diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index 90091b3172..f8e1a83f5e 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -23,6 +23,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/scaleway/scaleway-sdk-go/scw" @@ -103,6 +104,13 @@ type SDConfig struct { Role role `yaml:"role"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &scalewayMetrics{ + refreshMetrics: rmi, + } +} + func (c SDConfig) Name() string { return "scaleway" } @@ -160,7 +168,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(&c, options.Logger) + return NewDiscovery(&c, options.Logger, options.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -177,17 +185,25 @@ func init() { // the Discoverer interface. type Discovery struct{} -func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { + m, ok := metrics.(*scalewayMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + r, err := newRefresher(conf) if err != nil { return nil, err } return refresh.NewDiscovery( - logger, - "scaleway", - time.Duration(conf.RefreshInterval), - r.refresh, + refresh.Options{ + Logger: logger, + Mech: "scaleway", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: r.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ), nil } diff --git a/discovery/scaleway/testdata/instance.json b/discovery/scaleway/testdata/instance.json index f8d35b215c..b433f7598e 100644 --- a/discovery/scaleway/testdata/instance.json +++ b/discovery/scaleway/testdata/instance.json @@ -216,6 +216,146 @@ "placement_group": null, "private_nics": [], "zone": "fr-par-1" + }, + { + "id": "4904366a-7e26-4b65-b97b-6392c761247a", + "name": "routed-dualstack", + "arch": "x86_64", + "commercial_type": "DEV1-S", + "boot_type": "local", + "organization": "20b3d507-96ac-454c-a795-bc731b46b12f", + "project": "20b3d507-96ac-454c-a795-bc731b46b12f", + "hostname": "routed-dualstack", + "image": { + "id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160", + "name": "Ubuntu 22.04 Jammy Jellyfish", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "project": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "root_volume": { + "id": "13d945b9-5e78-4f9d-8ac4-c4bc2fa7c31a", + "name": "Ubuntu 22.04 Jammy Jellyfish", + "volume_type": "unified", + "size": 10000000000 + }, + "extra_volumes": {}, + "public": true, + "arch": "x86_64", + "creation_date": "2024-02-22T15:52:56.037007+00:00", + "modification_date": "2024-02-22T15:52:56.037007+00:00", + "default_bootscript": null, + "from_server": null, + "state": "available", + "tags": [], + "zone": "nl-ams-1" + }, + "volumes": { + "0": { + "boot": false, + "id": "fe85c817-e67e-4e24-8f13-bde3e9f42620", + "name": "Ubuntu 22.04 Jammy Jellyfish", + "volume_type": "l_ssd", + "export_uri": null, + "organization": "20b3d507-96ac-454c-a795-bc731b46b12f", + "project": "20b3d507-96ac-454c-a795-bc731b46b12f", + "server": { + "id": "4904366a-7e26-4b65-b97b-6392c761247a", + "name": "routed-dualstack" + }, + "size": 20000000000, + "state": "available", + "creation_date": "2024-04-19T14:50:14.019739+00:00", + "modification_date": "2024-04-19T14:50:14.019739+00:00", + "tags": [], + "zone": "nl-ams-1" + } + }, + "tags": [], + "state": "running", + "protected": false, + "state_detail": "booted", + "public_ip": { + "id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0", + "address": "51.158.183.115", + "dynamic": false, + "gateway": "62.210.0.1", + "netmask": "32", + "family": "inet", + "provisioning_mode": "dhcp", + "tags": [], + "state": "attached", + "ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e" + }, + "public_ips": [ + { + "id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0", + "address": "51.158.183.115", + "dynamic": false, + "gateway": "62.210.0.1", + "netmask": "32", + "family": "inet", + "provisioning_mode": "dhcp", + "tags": [], + "state": "attached", + "ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e" + }, + { + "id": "f52a8c81-0875-4aee-b96e-eccfc6bec367", + "address": "2001:bc8:1640:1568:dc00:ff:fe21:91b", + "dynamic": false, + "gateway": "fe80::dc00:ff:fe21:91c", + "netmask": "64", + "family": "inet6", + "provisioning_mode": "slaac", + "tags": [], + "state": "attached", + "ipam_id": "40d1e6ea-e932-42f9-8acb-55398bec7ad6" + } + ], + "mac_address": "de:00:00:21:09:1b", + "routed_ip_enabled": true, + "ipv6": null, + "extra_networks": [], + "dynamic_ip_required": false, + "enable_ipv6": false, + "private_ip": null, + "creation_date": "2024-04-19T14:50:14.019739+00:00", + "modification_date": "2024-04-19T14:52:21.181670+00:00", + "bootscript": { + "id": "5a520dda-96d6-4ed2-acd1-1d526b6859fe", + "public": true, + "title": "x86_64 mainline 4.4.182 rev1", + "architecture": "x86_64", + "organization": "11111111-1111-4111-8111-111111111111", + "project": "11111111-1111-4111-8111-111111111111", + "kernel": "http://10.196.2.9/kernel/x86_64-mainline-lts-4.4-4.4.182-rev1/vmlinuz-4.4.182", + "dtb": "", + "initrd": "http://10.196.2.9/initrd/initrd-Linux-x86_64-v3.14.6.gz", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "zone": "nl-ams-1" + }, + "security_group": { + "id": "984414da-9fc2-49c0-a925-fed6266fe092", + "name": "Default security group" + }, + "location": { + "zone_id": "ams1", + "platform_id": "23", + "cluster_id": "19", + "hypervisor_id": "1201", + "node_id": "24" + }, + "maintenances": [], + "allowed_actions": [ + "poweroff", + "terminate", + "reboot", + "stop_in_place", + "backup" + ], + "placement_group": null, + "private_nics": [], + "zone": "nl-ams-1" } ] -} +} \ No newline at end of file diff --git a/discovery/triton/metrics.go b/discovery/triton/metrics.go new file mode 100644 index 0000000000..3e34a840af --- /dev/null +++ b/discovery/triton/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package triton + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*tritonMetrics)(nil) + +type tritonMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *tritonMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *tritonMetrics) Unregister() {} diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index c83f3b34ab..675149f2a3 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -26,6 +26,7 @@ import ( "github.com/go-kit/log" "github.com/mwitkow/go-conntrack" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -69,12 +70,19 @@ type SDConfig struct { Version int `yaml:"version"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &tritonMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "triton" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return New(opts.Logger, c) + return New(opts.Logger, c, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -138,7 +146,12 @@ type Discovery struct { } // New returns a new Discovery which periodically refreshes its targets. -func New(logger log.Logger, conf *SDConfig) (*Discovery, error) { +func New(logger log.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*tritonMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + tls, err := config.NewTLSConfig(&conf.TLSConfig) if err != nil { return nil, err @@ -159,10 +172,13 @@ func New(logger log.Logger, conf *SDConfig) (*Discovery, error) { sdConfig: conf, } d.Discovery = refresh.NewDiscovery( - logger, - "triton", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "triton", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } @@ -195,7 +211,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { endpoint = fmt.Sprintf("%s?groups=%s", endpoint, groups) } - req, err := http.NewRequest("GET", endpoint, nil) + req, err := http.NewRequest(http.MethodGet, endpoint, nil) if err != nil { return nil, err } diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go index ca38965322..e37693e6bf 100644 --- a/discovery/triton/triton_test.go +++ b/discovery/triton/triton_test.go @@ -24,9 +24,12 @@ import ( "strings" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/discovery" ) var ( @@ -78,12 +81,26 @@ var ( } ) -func newTritonDiscovery(c SDConfig) (*Discovery, error) { - return New(nil, &c) +func newTritonDiscovery(c SDConfig) (*Discovery, discovery.DiscovererMetrics, error) { + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + // TODO(ptodev): Add the ability to unregister refresh metrics. + metrics := c.NewDiscovererMetrics(reg, refreshMetrics) + err := metrics.Register() + if err != nil { + return nil, nil, err + } + + d, err := New(nil, &c, metrics) + if err != nil { + return nil, nil, err + } + + return d, metrics, nil } func TestTritonSDNew(t *testing.T) { - td, err := newTritonDiscovery(conf) + td, m, err := newTritonDiscovery(conf) require.NoError(t, err) require.NotNil(t, td) require.NotNil(t, td.client) @@ -93,16 +110,17 @@ func TestTritonSDNew(t *testing.T) { require.Equal(t, conf.DNSSuffix, td.sdConfig.DNSSuffix) require.Equal(t, conf.Endpoint, td.sdConfig.Endpoint) require.Equal(t, conf.Port, td.sdConfig.Port) + m.Unregister() } func TestTritonSDNewBadConfig(t *testing.T) { - td, err := newTritonDiscovery(badconf) + td, _, err := newTritonDiscovery(badconf) require.Error(t, err) require.Nil(t, td) } func TestTritonSDNewGroupsConfig(t *testing.T) { - td, err := newTritonDiscovery(groupsconf) + td, m, err := newTritonDiscovery(groupsconf) require.NoError(t, err) require.NotNil(t, td) require.NotNil(t, td.client) @@ -113,10 +131,11 @@ func TestTritonSDNewGroupsConfig(t *testing.T) { require.Equal(t, groupsconf.Endpoint, td.sdConfig.Endpoint) require.Equal(t, groupsconf.Groups, td.sdConfig.Groups) require.Equal(t, groupsconf.Port, td.sdConfig.Port) + m.Unregister() } func TestTritonSDNewCNConfig(t *testing.T) { - td, err := newTritonDiscovery(cnconf) + td, m, err := newTritonDiscovery(cnconf) require.NoError(t, err) require.NotNil(t, td) require.NotNil(t, td.client) @@ -127,6 +146,7 @@ func TestTritonSDNewCNConfig(t *testing.T) { require.Equal(t, cnconf.DNSSuffix, td.sdConfig.DNSSuffix) require.Equal(t, cnconf.Endpoint, td.sdConfig.Endpoint) require.Equal(t, cnconf.Port, td.sdConfig.Port) + m.Unregister() } func TestTritonSDRefreshNoTargets(t *testing.T) { @@ -155,25 +175,27 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) { tgts := testTritonSDRefresh(t, conf, dstr) require.NotNil(t, tgts) - require.Equal(t, 2, len(tgts)) + require.Len(t, tgts, 2) } func TestTritonSDRefreshNoServer(t *testing.T) { - td, _ := newTritonDiscovery(conf) + td, m, _ := newTritonDiscovery(conf) _, err := td.refresh(context.Background()) require.Error(t, err) - require.Equal(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"), true) + require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint")) + m.Unregister() } func TestTritonSDRefreshCancelled(t *testing.T) { - td, _ := newTritonDiscovery(conf) + td, m, _ := newTritonDiscovery(conf) ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := td.refresh(ctx) require.Error(t, err) - require.Equal(t, strings.Contains(err.Error(), context.Canceled.Error()), true) + require.True(t, strings.Contains(err.Error(), context.Canceled.Error())) + m.Unregister() } func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { @@ -188,7 +210,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) - require.Equal(t, 2, len(tgts)) + require.Len(t, tgts, 2) } func TestTritonSDRefreshCNsWithHostname(t *testing.T) { @@ -205,13 +227,13 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) { tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) - require.Equal(t, 2, len(tgts)) + require.Len(t, tgts, 2) } func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet { var ( - td, _ = newTritonDiscovery(c) - s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + td, m, _ = newTritonDiscovery(c) + s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, dstr) })) ) @@ -235,9 +257,11 @@ func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet tgs, err := td.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) + m.Unregister() + return tg.Targets } diff --git a/discovery/util.go b/discovery/util.go new file mode 100644 index 0000000000..4e2a088518 --- /dev/null +++ b/discovery/util.go @@ -0,0 +1,72 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" +) + +// MetricRegisterer is used by implementations of discovery.Discoverer that need +// to manage the lifetime of their metrics. +type MetricRegisterer interface { + RegisterMetrics() error + UnregisterMetrics() +} + +// metricRegistererImpl is an implementation of MetricRegisterer. +type metricRegistererImpl struct { + reg prometheus.Registerer + metrics []prometheus.Collector +} + +var _ MetricRegisterer = &metricRegistererImpl{} + +// NewMetricRegisterer creates an instance of a MetricRegisterer. +// Typically called inside the implementation of the NewDiscoverer() method. +func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer { + return &metricRegistererImpl{ + reg: reg, + metrics: metrics, + } +} + +// RegisterMetrics registers the metrics with a Prometheus registerer. +// If any metric fails to register, it will unregister all metrics that +// were registered so far, and return an error. +// Typically called at the start of the SD's Run() method. +func (rh *metricRegistererImpl) RegisterMetrics() error { + for _, collector := range rh.metrics { + err := rh.reg.Register(collector) + if err != nil { + // Unregister all metrics that were registered so far. + // This is so that if RegisterMetrics() gets called again, + // there will not be an error due to a duplicate registration. + rh.UnregisterMetrics() + + return fmt.Errorf("failed to register metric: %w", err) + } + } + return nil +} + +// UnregisterMetrics unregisters the metrics from the same Prometheus +// registerer which was used to register them. +// Typically called at the end of the SD's Run() method by a defer statement. +func (rh *metricRegistererImpl) UnregisterMetrics() { + for _, collector := range rh.metrics { + rh.reg.Unregister(collector) + } +} diff --git a/discovery/uyuni/metrics.go b/discovery/uyuni/metrics.go new file mode 100644 index 0000000000..6793b59206 --- /dev/null +++ b/discovery/uyuni/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package uyuni + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*uyuniMetrics)(nil) + +type uyuniMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *uyuniMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *uyuniMetrics) Unregister() {} diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index e37acbf98a..2ab3396951 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -20,11 +20,13 @@ import ( "net/http" "net/url" "path" + "strconv" "strings" "time" "github.com/go-kit/log" "github.com/kolo/xmlrpc" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -39,10 +41,10 @@ const ( uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_" uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname" uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn" - uyuniLablelSystemID = uyuniMetaLabelPrefix + "system_id" - uyuniLablelGroups = uyuniMetaLabelPrefix + "groups" - uyuniLablelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" - uyuniLablelExporter = uyuniMetaLabelPrefix + "exporter" + uyuniLabelSystemID = uyuniMetaLabelPrefix + "system_id" + uyuniLabelGroups = uyuniMetaLabelPrefix + "groups" + uyuniLabelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" + uyuniLabelExporter = uyuniMetaLabelPrefix + "exporter" uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module" uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path" uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme" @@ -110,12 +112,19 @@ type Discovery struct { logger log.Logger } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &uyuniMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "uyuni" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -146,7 +155,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Password == "" { return errors.New("Uyuni SD configuration requires a password") } - return nil + return c.HTTPClientConfig.Validate() } func login(rpcclient *xmlrpc.Client, user, pass string, duration int) (string, error) { @@ -203,7 +212,12 @@ func getEndpointInfoForSystems( } // NewDiscovery returns a uyuni discovery for the given configuration. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*uyuniMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + apiURL, err := url.Parse(conf.Server) if err != nil { return nil, err @@ -227,10 +241,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { } d.Discovery = refresh.NewDiscovery( - logger, - "uyuni", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "uyuni", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } @@ -253,10 +270,10 @@ func (d *Discovery) getEndpointLabels( model.AddressLabel: model.LabelValue(addr), uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname), uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN), - uyuniLablelSystemID: model.LabelValue(fmt.Sprintf("%d", endpoint.SystemID)), - uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), - uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName), - uyuniLablelExporter: model.LabelValue(endpoint.ExporterName), + uyuniLabelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), + uyuniLabelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), + uyuniLabelEndpointName: model.LabelValue(endpoint.EndpointName), + uyuniLabelExporter: model.LabelValue(endpoint.ExporterName), uyuniLabelProxyModule: model.LabelValue(endpoint.Module), uyuniLabelMetricsPath: model.LabelValue(endpoint.Path), uyuniLabelScheme: model.LabelValue(scheme), diff --git a/discovery/uyuni/uyuni_test.go b/discovery/uyuni/uyuni_test.go index d045cde6d7..09be23e2b4 100644 --- a/discovery/uyuni/uyuni_test.go +++ b/discovery/uyuni/uyuni_test.go @@ -23,6 +23,9 @@ import ( "github.com/stretchr/testify/require" + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -35,7 +38,17 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err Server: ts.URL, } - md, err := NewDiscovery(&conf, nil) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) + err := metrics.Register() + if err != nil { + return nil, err + } + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + md, err := NewDiscovery(&conf, nil, metrics) if err != nil { return nil, err } @@ -55,7 +68,7 @@ func TestUyuniSDHandleError(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } func TestUyuniSDLogin(t *testing.T) { @@ -87,7 +100,7 @@ func TestUyuniSDLogin(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } func TestUyuniSDSkipLogin(t *testing.T) { @@ -108,7 +121,14 @@ func TestUyuniSDSkipLogin(t *testing.T) { Server: ts.URL, } - md, err := NewDiscovery(&conf, nil) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + md, err := NewDiscovery(&conf, nil, metrics) if err != nil { t.Error(err) } @@ -119,5 +139,5 @@ func TestUyuniSDSkipLogin(t *testing.T) { tgs, err := md.refresh(context.Background()) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } diff --git a/discovery/vultr/metrics.go b/discovery/vultr/metrics.go new file mode 100644 index 0000000000..193436aad2 --- /dev/null +++ b/discovery/vultr/metrics.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vultr + +import ( + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*vultrMetrics)(nil) + +type vultrMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator +} + +// Register implements discovery.DiscovererMetrics. +func (m *vultrMetrics) Register() error { + return nil +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *vultrMetrics) Unregister() {} diff --git a/discovery/vultr/mock_test.go b/discovery/vultr/mock_test.go index 417866fcef..bfc24d06fb 100644 --- a/discovery/vultr/mock_test.go +++ b/discovery/vultr/mock_test.go @@ -20,7 +20,7 @@ import ( "testing" ) -// SDMock is the interface for the Vultr mock +// SDMock is the interface for the Vultr mock. type SDMock struct { t *testing.T Server *httptest.Server diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index 42881d3c19..aaa9c64e47 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -23,6 +23,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -73,12 +74,19 @@ type SDConfig struct { Port int `yaml:"port"` } +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &vultrMetrics{ + refreshMetrics: rmi, + } +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "vultr" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Metrics) } // SetDirectory joins any relative file paths with dir. @@ -106,7 +114,12 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { + m, ok := metrics.(*vultrMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + d := &Discovery{ port: conf.Port, } @@ -128,10 +141,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { } d.Discovery = refresh.NewDiscovery( - logger, - "vultr", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "vultr", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + MetricsInstantiator: m.refreshMetrics, + }, ) return d, nil } diff --git a/discovery/vultr/vultr_test.go b/discovery/vultr/vultr_test.go index b729541531..2f12a35529 100644 --- a/discovery/vultr/vultr_test.go +++ b/discovery/vultr/vultr_test.go @@ -20,8 +20,11 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/discovery" ) type VultrSDTestSuite struct { @@ -46,7 +49,15 @@ func TestVultrSDRefresh(t *testing.T) { cfg := DefaultSDConfig cfg.HTTPClientConfig.BearerToken = APIKey - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + + d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) require.NoError(t, err) endpoint, err := url.Parse(sdMock.Mock.Endpoint()) require.NoError(t, err) @@ -56,12 +67,12 @@ func TestVultrSDRefresh(t *testing.T) { tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 3, len(tg.Targets)) + require.Len(t, tg.Targets, 3) for i, k := range []model.LabelSet{ { diff --git a/discovery/xds/client.go b/discovery/xds/client.go index 9844c6d7ed..027ceb2715 100644 --- a/discovery/xds/client.go +++ b/discovery/xds/client.go @@ -179,7 +179,7 @@ func (rc *HTTPResourceClient) Fetch(ctx context.Context) (*v3.DiscoveryResponse, return nil, err } - request, err := http.NewRequest("POST", rc.endpoint, bytes.NewBuffer(reqBody)) + request, err := http.NewRequest(http.MethodPost, rc.endpoint, bytes.NewBuffer(reqBody)) if err != nil { return nil, err } diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go index ff5217359c..b699995fb7 100644 --- a/discovery/xds/client_test.go +++ b/discovery/xds/client_test.go @@ -53,14 +53,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) { require.Empty(t, endpointURL) require.Error(t, err) - require.Equal(t, err.Error(), "invalid xDS server URL") + require.Equal(t, "invalid xDS server URL", err.Error()) } func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring") require.Empty(t, endpointURL) - require.NotNil(t, err) + require.Error(t, err) require.Contains(t, err.Error(), "must be either 'http' or 'https'") } @@ -68,7 +68,7 @@ func TestMakeXDSResourceHttpEndpoint(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("http://127.0.0.1:5000"), "monitoring") require.NoError(t, err) - require.Equal(t, endpointURL.String(), "http://127.0.0.1:5000/v3/discovery:monitoring") + require.Equal(t, "http://127.0.0.1:5000/v3/discovery:monitoring", endpointURL.String()) } func TestCreateNewHTTPResourceClient(t *testing.T) { @@ -89,8 +89,8 @@ func TestCreateNewHTTPResourceClient(t *testing.T) { require.NoError(t, err) - require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1") - require.Equal(t, client.client.Timeout, 1*time.Minute) + require.Equal(t, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1", client.endpoint) + require.Equal(t, 1*time.Minute, client.client.Timeout) } func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) { @@ -138,7 +138,7 @@ func TestHTTPResourceClientFetchFullResponse(t *testing.T) { require.NotNil(t, res) require.Equal(t, client.ResourceTypeURL(), res.TypeUrl) - require.Len(t, res.Resources, 0) + require.Empty(t, res.Resources) require.Equal(t, "abc", client.latestNonce, "Nonce not cached") require.Equal(t, "1", client.latestVersion, "Version not cached") diff --git a/discovery/xds/kuma.go b/discovery/xds/kuma.go index bc88ba5540..d1d540aaf4 100644 --- a/discovery/xds/kuma.go +++ b/discovery/xds/kuma.go @@ -30,35 +30,12 @@ import ( "github.com/prometheus/prometheus/util/strutil" ) -var ( - // DefaultKumaSDConfig is the default Kuma MADS SD configuration. - DefaultKumaSDConfig = KumaSDConfig{ - HTTPClientConfig: config.DefaultHTTPClientConfig, - RefreshInterval: model.Duration(15 * time.Second), - FetchTimeout: model.Duration(2 * time.Minute), - } - - kumaFetchFailuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_kuma_fetch_failures_total", - Help: "The number of Kuma MADS fetch call failures.", - }) - kumaFetchSkipUpdateCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_kuma_fetch_skipped_updates_total", - Help: "The number of Kuma MADS fetch calls that result in no updates to the targets.", - }) - kumaFetchDuration = prometheus.NewSummary( - prometheus.SummaryOpts{ - Namespace: namespace, - Name: "sd_kuma_fetch_duration_seconds", - Help: "The duration of a Kuma MADS fetch call.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - ) -) +// DefaultKumaSDConfig is the default Kuma MADS SD configuration. +var DefaultKumaSDConfig = KumaSDConfig{ + HTTPClientConfig: config.DefaultHTTPClientConfig, + RefreshInterval: model.Duration(15 * time.Second), + FetchTimeout: model.Duration(2 * time.Minute), +} const ( // kumaMetaLabelPrefix is the meta prefix used for all kuma meta labels. @@ -81,6 +58,11 @@ const ( type KumaSDConfig = SDConfig +// NewDiscovererMetrics implements discovery.Config. +func (*KumaSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return newDiscovererMetrics(reg, rmi) +} + // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *KumaSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultKumaSDConfig @@ -120,7 +102,7 @@ func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discover logger = log.NewNopLogger() } - return NewKumaHTTPDiscovery(c, logger) + return NewKumaHTTPDiscovery(c, logger, opts.Metrics) } func convertKumaV1MonitoringAssignment(assignment *MonitoringAssignment) []model.LabelSet { @@ -176,12 +158,21 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L return targets, nil } -func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger) (discovery.Discoverer, error) { +func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) { + m, ok := metrics.(*xdsMetrics) + if !ok { + return nil, fmt.Errorf("invalid discovery metrics type") + } + // Default to "prometheus" if hostname is unavailable. - clientID, err := osutil.GetFQDN() - if err != nil { - level.Debug(logger).Log("msg", "error getting FQDN", "err", err) - clientID = "prometheus" + clientID := conf.ClientID + if clientID == "" { + var err error + clientID, err = osutil.GetFQDN() + if err != nil { + level.Debug(logger).Log("msg", "error getting FQDN", "err", err) + clientID = "prometheus" + } } clientConfig := &HTTPResourceClientConfig{ @@ -203,14 +194,12 @@ func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger) (discovery.Disc } d := &fetchDiscovery{ - client: client, - logger: logger, - refreshInterval: time.Duration(conf.RefreshInterval), - source: "kuma", - parseResources: kumaMadsV1ResourceParser, - fetchFailuresCount: kumaFetchFailuresCount, - fetchSkipUpdateCount: kumaFetchSkipUpdateCount, - fetchDuration: kumaFetchDuration, + client: client, + logger: logger, + refreshInterval: time.Duration(conf.RefreshInterval), + source: "kuma", + parseResources: kumaMadsV1ResourceParser, + metrics: m, } return d, nil diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go index 1db0a0831d..cfb9cbac50 100644 --- a/discovery/xds/kuma_test.go +++ b/discovery/xds/kuma_test.go @@ -21,12 +21,14 @@ import ( "time" v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -107,7 +109,16 @@ func getKumaMadsV1DiscoveryResponse(resources ...*MonitoringAssignment) (*v3.Dis } func newKumaTestHTTPDiscovery(c KumaSDConfig) (*fetchDiscovery, error) { - kd, err := NewKumaHTTPDiscovery(&c, nopLogger) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + // TODO(ptodev): Add the ability to unregister refresh metrics. + metrics := c.NewDiscovererMetrics(reg, refreshMetrics) + err := metrics.Register() + if err != nil { + return nil, err + } + + kd, err := NewKumaHTTPDiscovery(&c, nopLogger, metrics) if err != nil { return nil, err } @@ -129,7 +140,7 @@ func TestKumaMadsV1ResourceParserInvalidTypeURL(t *testing.T) { func TestKumaMadsV1ResourceParserEmptySlice(t *testing.T) { resources := make([]*anypb.Any, 0) groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL) - require.Len(t, groups, 0) + require.Empty(t, groups) require.NoError(t, err) } @@ -204,8 +215,10 @@ func TestNewKumaHTTPDiscovery(t *testing.T) { require.True(t, ok) require.Equal(t, kumaConf.Server, resClient.Server()) require.Equal(t, KumaMadsV1ResourceTypeURL, resClient.ResourceTypeURL()) - require.NotEmpty(t, resClient.ID()) + require.Equal(t, kumaConf.ClientID, resClient.ID()) require.Equal(t, KumaMadsV1ResourceType, resClient.config.ResourceType) + + kd.metrics.Unregister() } func TestKumaHTTPDiscoveryRefresh(t *testing.T) { @@ -300,4 +313,6 @@ tls_config: case <-ch: require.Fail(t, "no update expected") } + + kd.metrics.Unregister() } diff --git a/discovery/xds/metrics.go b/discovery/xds/metrics.go new file mode 100644 index 0000000000..597d516566 --- /dev/null +++ b/discovery/xds/metrics.go @@ -0,0 +1,73 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xds + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" +) + +var _ discovery.DiscovererMetrics = (*xdsMetrics)(nil) + +type xdsMetrics struct { + fetchDuration prometheus.Summary + fetchSkipUpdateCount prometheus.Counter + fetchFailuresCount prometheus.Counter + + metricRegisterer discovery.MetricRegisterer +} + +func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + m := &xdsMetrics{ + fetchFailuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_kuma_fetch_failures_total", + Help: "The number of Kuma MADS fetch call failures.", + }), + fetchSkipUpdateCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_kuma_fetch_skipped_updates_total", + Help: "The number of Kuma MADS fetch calls that result in no updates to the targets.", + }), + fetchDuration: prometheus.NewSummary( + prometheus.SummaryOpts{ + Namespace: namespace, + Name: "sd_kuma_fetch_duration_seconds", + Help: "The duration of a Kuma MADS fetch call.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + ), + } + + m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ + m.fetchFailuresCount, + m.fetchSkipUpdateCount, + m.fetchDuration, + }) + + return m +} + +// Register implements discovery.DiscovererMetrics. +func (m *xdsMetrics) Register() error { + return m.metricRegisterer.RegisterMetrics() +} + +// Unregister implements discovery.DiscovererMetrics. +func (m *xdsMetrics) Unregister() { + m.metricRegisterer.UnregisterMetrics() +} diff --git a/discovery/xds/xds.go b/discovery/xds/xds.go index 48bdbab02b..8191d6be1a 100644 --- a/discovery/xds/xds.go +++ b/discovery/xds/xds.go @@ -20,7 +20,6 @@ import ( v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "google.golang.org/protobuf/encoding/protojson" @@ -55,6 +54,7 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` FetchTimeout model.Duration `yaml:"fetch_timeout,omitempty"` Server string `yaml:"server,omitempty"` + ClientID string `yaml:"client_id,omitempty"` } // mustRegisterMessage registers the provided message type in the typeRegistry, and panics @@ -69,9 +69,6 @@ func init() { // Register top-level SD Configs. discovery.RegisterConfig(&KumaSDConfig{}) - // Register metrics. - prometheus.MustRegister(kumaFetchDuration, kumaFetchSkipUpdateCount, kumaFetchFailuresCount) - // Register protobuf types that need to be marshalled/ unmarshalled. mustRegisterMessage(protoTypes, (&v3.DiscoveryRequest{}).ProtoReflect().Type()) mustRegisterMessage(protoTypes, (&v3.DiscoveryResponse{}).ProtoReflect().Type()) @@ -109,9 +106,7 @@ type fetchDiscovery struct { parseResources resourceParser logger log.Logger - fetchDuration prometheus.Observer - fetchSkipUpdateCount prometheus.Counter - fetchFailuresCount prometheus.Counter + metrics *xdsMetrics } func (d *fetchDiscovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { @@ -135,7 +130,7 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou t0 := time.Now() response, err := d.client.Fetch(ctx) elapsed := time.Since(t0) - d.fetchDuration.Observe(elapsed.Seconds()) + d.metrics.fetchDuration.Observe(elapsed.Seconds()) // Check the context before in order to exit early. select { @@ -146,20 +141,20 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou if err != nil { level.Error(d.logger).Log("msg", "error parsing resources", "err", err) - d.fetchFailuresCount.Inc() + d.metrics.fetchFailuresCount.Inc() return } if response == nil { // No update needed. - d.fetchSkipUpdateCount.Inc() + d.metrics.fetchSkipUpdateCount.Inc() return } parsedTargets, err := d.parseResources(response.Resources, response.TypeUrl) if err != nil { level.Error(d.logger).Log("msg", "error parsing resources", "err", err) - d.fetchFailuresCount.Inc() + d.metrics.fetchFailuresCount.Inc() return } diff --git a/discovery/xds/xds_test.go b/discovery/xds/xds_test.go index 974a47342f..7cce021c5f 100644 --- a/discovery/xds/xds_test.go +++ b/discovery/xds/xds_test.go @@ -29,23 +29,15 @@ import ( "go.uber.org/goleak" "google.golang.org/protobuf/types/known/anypb" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - sdConf = SDConfig{ - Server: "http://127.0.0.1", - RefreshInterval: model.Duration(10 * time.Second), - } - - testFetchFailuresCount = prometheus.NewCounter( - prometheus.CounterOpts{}) - testFetchSkipUpdateCount = prometheus.NewCounter( - prometheus.CounterOpts{}) - testFetchDuration = prometheus.NewSummary( - prometheus.SummaryOpts{}, - ) -) +var sdConf = SDConfig{ + Server: "http://127.0.0.1", + RefreshInterval: model.Duration(10 * time.Second), + ClientID: "test-id", +} func TestMain(m *testing.M) { goleak.VerifyTestMain(m) @@ -132,12 +124,22 @@ func TestPollingRefreshSkipUpdate(t *testing.T) { return nil, nil }, } + + cfg := &SDConfig{} + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + + xdsMetrics, ok := metrics.(*xdsMetrics) + if !ok { + require.Fail(t, "invalid discovery metrics type") + } + pd := &fetchDiscovery{ - client: rc, - logger: nopLogger, - fetchDuration: testFetchDuration, - fetchFailuresCount: testFetchFailuresCount, - fetchSkipUpdateCount: testFetchSkipUpdateCount, + client: rc, + logger: nopLogger, + metrics: xdsMetrics, } ctx, cancel := context.WithCancel(context.Background()) @@ -154,6 +156,9 @@ func TestPollingRefreshSkipUpdate(t *testing.T) { case <-ch: require.Fail(t, "no update expected") } + + metrics.Unregister() + refreshMetrics.Unregister() } func TestPollingRefreshAttachesGroupMetadata(t *testing.T) { @@ -166,13 +171,18 @@ func TestPollingRefreshAttachesGroupMetadata(t *testing.T) { return &v3.DiscoveryResponse{}, nil }, } + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := newDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + xdsMetrics, ok := metrics.(*xdsMetrics) + require.True(t, ok) + pd := &fetchDiscovery{ - source: source, - client: rc, - logger: nopLogger, - fetchDuration: testFetchDuration, - fetchFailuresCount: testFetchFailuresCount, - fetchSkipUpdateCount: testFetchSkipUpdateCount, + source: source, + client: rc, + logger: nopLogger, parseResources: constantResourceParser([]model.LabelSet{ { "__meta_custom_xds_label": "a-value", @@ -185,6 +195,7 @@ func TestPollingRefreshAttachesGroupMetadata(t *testing.T) { "instance": "prometheus-02", }, }, nil), + metrics: xdsMetrics, } ch := make(chan []*targetgroup.Group, 1) pd.poll(context.Background(), ch) @@ -201,6 +212,9 @@ func TestPollingRefreshAttachesGroupMetadata(t *testing.T) { target2 := group.Targets[1] require.Contains(t, target2, model.LabelName("__meta_custom_xds_label")) require.Equal(t, model.LabelValue("a-value"), target2["__meta_custom_xds_label"]) + + metrics.Unregister() + refreshMetrics.Unregister() } func TestPollingDisappearingTargets(t *testing.T) { @@ -242,14 +256,23 @@ func TestPollingDisappearingTargets(t *testing.T) { }, nil } + cfg := &SDConfig{} + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + + xdsMetrics, ok := metrics.(*xdsMetrics) + if !ok { + require.Fail(t, "invalid discovery metrics type") + } + pd := &fetchDiscovery{ - source: source, - client: rc, - logger: nopLogger, - fetchDuration: testFetchDuration, - fetchFailuresCount: testFetchFailuresCount, - fetchSkipUpdateCount: testFetchSkipUpdateCount, - parseResources: parser, + source: source, + client: rc, + logger: nopLogger, + parseResources: parser, + metrics: xdsMetrics, } ch := make(chan []*targetgroup.Group, 1) @@ -270,4 +293,7 @@ func TestPollingDisappearingTargets(t *testing.T) { require.Equal(t, source, groups[0].Source) require.Len(t, groups[0].Targets, 1) + + metrics.Unregister() + refreshMetrics.Unregister() } diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index cadff5fd2e..92904dd71c 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -25,6 +25,7 @@ import ( "github.com/go-kit/log" "github.com/go-zookeeper/zk" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery" @@ -56,6 +57,11 @@ type ServersetSDConfig struct { Timeout model.Duration `yaml:"timeout,omitempty"` } +// NewDiscovererMetrics implements discovery.Config. +func (*ServersetSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &discovery.NoopDiscovererMetrics{} +} + // Name returns the name of the Config. func (*ServersetSDConfig) Name() string { return "serverset" } @@ -93,6 +99,11 @@ type NerveSDConfig struct { Timeout model.Duration `yaml:"timeout,omitempty"` } +// NewDiscovererMetrics implements discovery.Config. +func (*NerveSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &discovery.NoopDiscovererMetrics{} +} + // Name returns the name of the Config. func (*NerveSDConfig) Name() string { return "nerve" } @@ -193,7 +204,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } for _, pathUpdate := range d.pathUpdates { // Drain event channel in case the treecache leaks goroutines otherwise. - for range pathUpdate { // nolint:revive + for range pathUpdate { } } d.conn.Close() @@ -269,18 +280,17 @@ func parseServersetMember(data []byte, path string) (model.LabelSet, error) { labels := model.LabelSet{} labels[serversetPathLabel] = model.LabelValue(path) labels[model.AddressLabel] = model.LabelValue( - net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port))) + net.JoinHostPort(member.ServiceEndpoint.Host, strconv.Itoa(member.ServiceEndpoint.Port))) labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host) - labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port)) + labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.ServiceEndpoint.Port)) for name, endpoint := range member.AdditionalEndpoints { cleanName := model.LabelName(strutil.SanitizeLabelName(name)) labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue( endpoint.Host) labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue( - fmt.Sprintf("%d", endpoint.Port)) - + strconv.Itoa(endpoint.Port)) } labels[serversetStatusLabel] = model.LabelValue(member.Status) @@ -311,10 +321,10 @@ func parseNerveMember(data []byte, path string) (model.LabelSet, error) { labels := model.LabelSet{} labels[nervePathLabel] = model.LabelValue(path) labels[model.AddressLabel] = model.LabelValue( - net.JoinHostPort(member.Host, fmt.Sprintf("%d", member.Port))) + net.JoinHostPort(member.Host, strconv.Itoa(member.Port))) labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host) - labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.Port)) + labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.Port)) labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name) return labels, nil diff --git a/discovery/zookeeper/zookeeper_test.go b/discovery/zookeeper/zookeeper_test.go index d0e67b50ac..c2b41ce7a3 100644 --- a/discovery/zookeeper/zookeeper_test.go +++ b/discovery/zookeeper/zookeeper_test.go @@ -18,6 +18,7 @@ import ( "time" "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" "go.uber.org/goleak" ) @@ -31,7 +32,5 @@ func TestNewDiscoveryError(t *testing.T) { time.Second, []string{"/"}, nil, func(data []byte, path string) (model.LabelSet, error) { return nil, nil }) - if err == nil { - t.Fatalf("expected error, got nil") - } + require.Error(t, err) } diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 78ec205f24..a179a2f9f1 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -15,23 +15,27 @@ The Prometheus monitoring server | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | | --version | Show application version. | | | --config.file | Prometheus configuration file path. | `prometheus.yml` | -| --web.listen-address | Address to listen on for UI, API, and telemetry. | `0.0.0.0:9090` | +| --config.auto-reload-interval | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` | +| --web.listen-address ... | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` | +| --auto-gomemlimit.ratio | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` | | --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | --web.read-timeout | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | -| --web.max-connections | Maximum number of simultaneous connections. | `512` | +| --web.max-connections | Maximum number of simultaneous connections across all listeners. | `512` | +| --web.max-notifications-subscribers | Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close. | `16` | | --web.external-url | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | | | --web.route-prefix | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | | | --web.user-assets | Path to static asset directory, available at /user. | | | --web.enable-lifecycle | Enable shutdown and reload via HTTP request. | `false` | | --web.enable-admin-api | Enable API endpoints for admin control actions. | `false` | | --web.enable-remote-write-receiver | Enable API endpoint accepting remote write requests. | `false` | +| --web.remote-write-receiver.accepted-protobuf-messages | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` | +| --web.enable-otlp-receiver | Enable API endpoint accepting OTLP write requests. | `false` | | --web.console.templates | Path to the console template directory, available at /consoles. | `consoles` | | --web.console.libraries | Path to the console library directory. | `console_libraries` | | --web.page-title | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | -| --web.cors.origin | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com' | `.*` | +| --web.cors.origin | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` | | --storage.tsdb.path | Base path for metrics storage. Use with server mode only. | `data/` | -| --storage.tsdb.retention | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | | -| --storage.tsdb.retention.time | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | +| --storage.tsdb.retention.time | How long to retain samples in storage. If neither this flag nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | | --storage.tsdb.retention.size | Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Use with server mode only. | | | --storage.tsdb.no-lockfile | Do not create lockfile in data directory. Use with server mode only. | `false` | | --storage.tsdb.head-chunks-write-queue-size | Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental. Use with server mode only. | `0` | @@ -47,12 +51,15 @@ The Prometheus monitoring server | --rules.alert.for-outage-tolerance | Max time to tolerate prometheus outage for restoring "for" state of alert. Use with server mode only. | `1h` | | --rules.alert.for-grace-period | Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. Use with server mode only. | `10m` | | --rules.alert.resend-delay | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` | +| --rules.max-concurrent-evals | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` | | --alertmanager.notification-queue-capacity | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` | +| --alertmanager.drain-notification-queue-on-shutdown | Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down. Use with server mode only. | `true` | | --query.lookback-delta | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` | | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 546b200e27..996a996555 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system. | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | --version | Show application version. | | --experimental | Enable experimental commands. | -| --enable-feature | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | +| --enable-feature ... | Comma separated feature names to enable. Currently unused. | @@ -281,7 +281,7 @@ Run series query. | Flag | Description | | --- | --- | -| --match | Series selector. Can be specified multiple times. | +| --match ... | Series selector. Can be specified multiple times. | | --start | Start time (RFC3339 or Unix timestamp). | | --end | End time (RFC3339 or Unix timestamp). | @@ -309,7 +309,7 @@ Run labels query. | --- | --- | | --start | Start time (RFC3339 or Unix timestamp). | | --end | End time (RFC3339 or Unix timestamp). | -| --match | Series selector. Can be specified multiple times. | +| --match ... | Series selector. Can be specified multiple times. | @@ -324,6 +324,25 @@ Run labels query. +##### `promtool query analyze` + +Run queries against your Prometheus to analyze the usage pattern of certain metrics. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --server | Prometheus server to query. | | +| --type | Type of metric: histogram. | | +| --duration | Time frame to analyze. | `1h` | +| --time | Query time (RFC3339 or Unix timestamp), defaults to now. | | +| --match ... | Series selector. Can be specified multiple times. | | + + + + ### `promtool debug` Fetch debug information. @@ -423,12 +442,31 @@ Unit testing. +#### Flags + +| Flag | Description | +| --- | --- | +| --junit | File path to store JUnit XML test results. | + + + + ##### `promtool test rules` Unit tests for rules. +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --run ... | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | +| --diff | [Experimental] Print colored differential output between expected & received output. | `false` | + + + + ###### Arguments | Argument | Description | Required | @@ -488,6 +526,7 @@ Analyze churn, label pair cardinality and compaction efficiency. | --- | --- | --- | | --limit | How many items to show in each list. | `20` | | --extended | Run extended analysis. | | +| --match | Series selector to analyze. Only 1 set of matchers is supported now. | | @@ -536,9 +575,37 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | +| --sandbox-dir-root | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | | --min-time | Minimum timestamp to dump. | `-9223372036854775808` | | --max-time | Maximum timestamp to dump. | `9223372036854775807` | -| --match | Series selector. | `{__name__=~'(?s:.*)'}` | +| --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | + + + + +###### Arguments + +| Argument | Description | Default | +| --- | --- | --- | +| db path | Database path (default is data/). | `data/` | + + + + +##### `promtool tsdb dump-openmetrics` + +[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --sandbox-dir-root | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | +| --min-time | Minimum timestamp to dump. | `-9223372036854775808` | +| --max-time | Maximum timestamp to dump. | `9223372036854775807` | +| --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | @@ -574,6 +641,15 @@ Import samples from OpenMetrics input and produce TSDB blocks. Please refer to t +###### Flags + +| Flag | Description | +| --- | --- | +| --label | Label to attach to metrics. Can be specified multiple times. Example --label=label_name=label_value | + + + + ###### Arguments | Argument | Description | Default | Required | diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 6691902579..096809397a 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -37,7 +37,7 @@ Generic placeholders are defined as follows: * ``: a floating-point number * ``: a valid string consisting of a hostname or IP followed by an optional port number * ``: an integer value -* ``: a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*` +* ``: a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*`. Any other unsupported character in the source label should be converted to an underscore. For example, the label `app.kubernetes.io/name` should be written as `app_kubernetes_io_name`. * ``: a string of unicode characters * ``: a valid URL path * ``: a string that can take the values `http` or `https` @@ -61,9 +61,23 @@ global: # How long until a scrape request times out. [ scrape_timeout: | default = 10s ] + # The protocols to negotiate during a scrape with the client. + # Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, + # OpenMetricsText1.0.0, PrometheusText0.0.4. + # The default value changes to [ PrometheusProto, OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText0.0.4 ] + # when native_histogram feature flag is set. + [ scrape_protocols: [, ...] | default = [ OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText0.0.4 ] ] + # How frequently to evaluate rules. [ evaluation_interval: | default = 1m ] + # Offset the rule evaluation timestamp of this particular group by the + # specified duration into the past to ensure the underlying metrics have + # been received. Metric availability delays are more likely to occur when + # Prometheus is running as a remote write target, but can also occur when + # there's anomalies with scraping. + [ rule_query_offset: | default = 0s ] + # The labels to add to any time series or alerts when communicating with # external systems (federation, remote storage, Alertmanager). external_labels: @@ -73,39 +87,60 @@ global: # Reloading the configuration will reopen the file. [ query_log_file: ] + # File to which scrape failures are logged. + # Reloading the configuration will reopen the file. + [ scrape_failure_log_file: ] + # An uncompressed response body larger than this many bytes will cause the # scrape to fail. 0 means no limit. Example: 100MB. # This is an experimental feature, this behaviour could # change or be removed in the future. [ body_size_limit: | default = 0 ] - # Per-scrape limit on number of scraped samples that will be accepted. + # Per-scrape limit on the number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. [ sample_limit: | default = 0 ] - # Per-scrape limit on number of labels that will be accepted for a sample. If - # more than this number of labels are present post metric-relabeling, the - # entire scrape will be treated as failed. 0 means no limit. + # Limit on the number of labels that will be accepted per sample. If more + # than this number of labels are present on any sample post metric-relabeling, + # the entire scrape will be treated as failed. 0 means no limit. [ label_limit: | default = 0 ] - # Per-scrape limit on length of labels name that will be accepted for a sample. - # If a label name is longer than this number post metric-relabeling, the entire - # scrape will be treated as failed. 0 means no limit. + # Limit on the length (in bytes) of each individual label name. If any label + # name in a scrape is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. Note that label names are UTF-8 + # encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_name_length_limit: | default = 0 ] - # Per-scrape limit on length of labels value that will be accepted for a sample. - # If a label value is longer than this number post metric-relabeling, the - # entire scrape will be treated as failed. 0 means no limit. + # Limit on the length (in bytes) of each individual label value. If any label + # value in a scrape is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. Note that label values are UTF-8 + # encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_value_length_limit: | default = 0 ] - # Per-scrape config limit on number of unique targets that will be + # Limit per scrape config on number of unique targets that will be # accepted. If more than this number of targets are present after target # relabeling, Prometheus will mark the targets as failed without scraping them. # 0 means no limit. This is an experimental feature, this behaviour could # change in the future. [ target_limit: | default = 0 ] + # Limit per scrape config on the number of targets dropped by relabeling + # that will be kept in memory. 0 means no limit. + [ keep_dropped_targets: | default = 0 ] + + # Specifies the validation scheme for metric and label names. Either blank or + # "utf8" for for full UTF-8 support, or "legacy" for letters, numbers, colons, + # and underscores. + [ metric_name_validation_scheme | default "utf8" ] + +runtime: + # Configure the Go garbage collector GOGC parameter + # See: https://tip.golang.org/doc/gc-guide#GOGC + # Lowering this number increases CPU usage. + [ gogc: | default = 75 ] + # Rule files specifies a list of globs. Rules and alerts are read from # all matching files. rule_files: @@ -131,6 +166,10 @@ alerting: remote_write: [ - ... ] +# Settings related to the OTLP receiver feature. +otlp: + [ promote_resource_attributes: [, ...] | default = [ ] ] + # Settings related to the remote read feature. remote_read: [ - ... ] @@ -167,6 +206,11 @@ job_name: # Per-scrape timeout when scraping this job. [ scrape_timeout: | default = ] +# The protocols to negotiate during a scrape with the client. +# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, +# OpenMetricsText1.0.0, PrometheusText0.0.4. +[ scrape_protocols: [, ...] | default = ] + # Whether to scrape a classic histogram that is also exposed as a native # histogram (has no effect without --enable-feature=native-histograms). [ scrape_classic_histograms: | default = false ] @@ -206,6 +250,14 @@ job_name: # by the target will be ignored. [ honor_timestamps: | default = true ] +# track_timestamps_staleness controls whether Prometheus tracks staleness of +# the metrics that have an explicit timestamps present in scraped data. +# +# If track_timestamps_staleness is set to "true", a staleness marker will be +# inserted in the TSDB when a metric is no longer present or the target +# is down. +[ track_timestamps_staleness: | default = false ] + # Configures the protocol scheme used for requests. [ scheme: | default = http ] @@ -213,11 +265,17 @@ job_name: params: [ : [, ...] ] +# If enable_compression is set to "false", Prometheus will request uncompressed +# response from the scraped target. +[ enable_compression: | default = true ] + # Sets the `Authorization` header on every scrape request with the # configured username and password. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -260,6 +318,21 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + +# File to which scrape failures are logged. +# Reloading the configuration will reopen the file. +[ scrape_failure_log_file: ] # List of Azure service discovery configurations. azure_sd_configs: @@ -388,37 +461,90 @@ metric_relabel_configs: # change or be removed in the future. [ body_size_limit: | default = 0 ] -# Per-scrape limit on number of scraped samples that will be accepted. +# Per-scrape limit on the number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. [ sample_limit: | default = 0 ] -# Per-scrape limit on number of labels that will be accepted for a sample. If -# more than this number of labels are present post metric-relabeling, the -# entire scrape will be treated as failed. 0 means no limit. +# Limit on the number of labels that will be accepted per sample. If more +# than this number of labels are present on any sample post metric-relabeling, +# the entire scrape will be treated as failed. 0 means no limit. [ label_limit: | default = 0 ] -# Per-scrape limit on length of labels name that will be accepted for a sample. -# If a label name is longer than this number post metric-relabeling, the entire -# scrape will be treated as failed. 0 means no limit. +# Limit on the length (in bytes) of each individual label name. If any label +# name in a scrape is longer than this number post metric-relabeling, the +# entire scrape will be treated as failed. Note that label names are UTF-8 +# encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_name_length_limit: | default = 0 ] -# Per-scrape limit on length of labels value that will be accepted for a sample. -# If a label value is longer than this number post metric-relabeling, the -# entire scrape will be treated as failed. 0 means no limit. +# Limit on the length (in bytes) of each individual label value. If any label +# value in a scrape is longer than this number post metric-relabeling, the +# entire scrape will be treated as failed. Note that label values are UTF-8 +# encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_value_length_limit: | default = 0 ] -# Per-scrape config limit on number of unique targets that will be +# Limit per scrape config on number of unique targets that will be # accepted. If more than this number of targets are present after target # relabeling, Prometheus will mark the targets as failed without scraping them. # 0 means no limit. This is an experimental feature, this behaviour could # change in the future. [ target_limit: | default = 0 ] +# Limit per scrape config on the number of targets dropped by relabeling +# that will be kept in memory. 0 means no limit. +[ keep_dropped_targets: | default = 0 ] + +# Specifies the validation scheme for metric and label names. Either blank or +# "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, and +# underscores. +[ metric_name_validation_scheme | default "utf8" ] + # Limit on total number of positive and negative buckets allowed in a single -# native histogram. If this is exceeded, the entire scrape will be treated as -# failed. 0 means no limit. +# native histogram. The resolution of a histogram with more buckets will be +# reduced until the number of buckets is within the limit. If the limit cannot +# be reached, the scrape will fail. +# 0 means no limit. [ native_histogram_bucket_limit: | default = 0 ] + +# Lower limit for the growth factor of one bucket to the next in each native +# histogram. The resolution of a histogram with a lower growth factor will be +# reduced as much as possible until it is within the limit. +# To set an upper limit for the schema (equivalent to "scale" in OTel's +# exponential histograms), use the following factor limits: +# +# +----------------------------+----------------------------+ +# | growth factor | resulting schema AKA scale | +# +----------------------------+----------------------------+ +# | 65536 | -4 | +# +----------------------------+----------------------------+ +# | 256 | -3 | +# +----------------------------+----------------------------+ +# | 16 | -2 | +# +----------------------------+----------------------------+ +# | 4 | -1 | +# +----------------------------+----------------------------+ +# | 2 | 0 | +# +----------------------------+----------------------------+ +# | 1.4 | 1 | +# +----------------------------+----------------------------+ +# | 1.1 | 2 | +# +----------------------------+----------------------------+ +# | 1.09 | 3 | +# +----------------------------+----------------------------+ +# | 1.04 | 4 | +# +----------------------------+----------------------------+ +# | 1.02 | 5 | +# +----------------------------+----------------------------+ +# | 1.01 | 6 | +# +----------------------------+----------------------------+ +# | 1.005 | 7 | +# +----------------------------+----------------------------+ +# | 1.002 | 8 | +# +----------------------------+----------------------------+ +# +# 0 results in the smallest supported factor (which is currently ~1.0027 or +# schema 8, but might change in the future). +[ native_histogram_min_bucket_factor: | default = 0 ] ``` Where `` must be unique across all scrape configurations. @@ -499,6 +625,18 @@ tls_config: # Specifies headers to send to proxies during CONNECT requests. [ proxy_connect_header: [ : [, ...] ] ] + +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] ``` ### `` @@ -528,8 +666,10 @@ See below for the configuration options for Azure discovery: # The Azure environment. [ environment: | default = AzurePublicCloud ] -# The authentication method, either OAuth or ManagedIdentity. +# The authentication method, either OAuth, ManagedIdentity or SDK. # See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview +# SDK authentication method uses environment variables by default. +# See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication [ authentication_method: | default = OAuth] # The subscription ID. Always required. subscription_id: @@ -553,11 +693,13 @@ subscription_id: # Authentication information used to authenticate to the Azure API. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information, currently not support by Azure. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -588,6 +730,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -666,11 +820,13 @@ tags: # Authentication information used to authenticate to the consul server. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -701,6 +857,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -752,11 +920,13 @@ The following meta labels are available on targets during [relabeling](#relabel_ # Authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information, not currently supported by DigitalOcean. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -788,6 +958,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -816,12 +998,12 @@ Available meta labels: * `__meta_docker_container_id`: the id of the container * `__meta_docker_container_name`: the name of the container * `__meta_docker_container_network_mode`: the network mode of the container -* `__meta_docker_container_label_`: each label of the container +* `__meta_docker_container_label_`: each label of the container, with any unsupported characters converted to an underscore * `__meta_docker_network_id`: the ID of the network * `__meta_docker_network_name`: the name of the network * `__meta_docker_network_ingress`: whether the network is ingress * `__meta_docker_network_internal`: whether the network is internal -* `__meta_docker_network_label_`: each label of the network +* `__meta_docker_network_label_`: each label of the network, with any unsupported characters converted to an underscore * `__meta_docker_network_scope`: the scope of the network * `__meta_docker_network_ip`: the IP of the container in this network * `__meta_docker_port_private`: the port on the container @@ -846,6 +1028,18 @@ host: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -857,6 +1051,11 @@ tls_config: # The host to use if the container is in host networking mode. [ host_networking_host: | default = "localhost" ] +# Sort all non-nil networks in ascending order based on network name and +# get the first network if the container has multiple networks defined, +# thus avoiding collecting duplicate targets. +[ match_first_network: | default = true ] + # Optional filters to limit the discovery process to a subset of available # resources. # The available filters are listed in the upstream documentation: @@ -871,11 +1070,13 @@ tls_config: # Authentication information used to authenticate to the Docker daemon. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -932,7 +1133,7 @@ Available meta labels: * `__meta_dockerswarm_service_mode`: the mode of the service * `__meta_dockerswarm_service_endpoint_port_name`: the name of the endpoint port, if available * `__meta_dockerswarm_service_endpoint_port_publish_mode`: the publish mode of the endpoint port -* `__meta_dockerswarm_service_label_`: each label of the service +* `__meta_dockerswarm_service_label_`: each label of the service, with any unsupported characters converted to an underscore * `__meta_dockerswarm_service_task_container_hostname`: the container hostname of the target, if available * `__meta_dockerswarm_service_task_container_image`: the container image of the target * `__meta_dockerswarm_service_updating_status`: the status of the service, if available @@ -940,7 +1141,7 @@ Available meta labels: * `__meta_dockerswarm_network_name`: the name of the network * `__meta_dockerswarm_network_ingress`: whether the network is ingress * `__meta_dockerswarm_network_internal`: whether the network is internal -* `__meta_dockerswarm_network_label_`: each label of the network +* `__meta_dockerswarm_network_label_`: each label of the network, with any unsupported characters converted to an underscore * `__meta_dockerswarm_network_scope`: the scope of the network #### `tasks` @@ -952,7 +1153,7 @@ created using the `port` parameter defined in the SD configuration. Available meta labels: -* `__meta_dockerswarm_container_label_`: each label of the container +* `__meta_dockerswarm_container_label_`: each label of the container, with any unsupported characters converted to an underscore * `__meta_dockerswarm_task_id`: the id of the task * `__meta_dockerswarm_task_container_id`: the container id of the task * `__meta_dockerswarm_task_desired_state`: the desired state of the task @@ -962,19 +1163,19 @@ Available meta labels: * `__meta_dockerswarm_service_id`: the id of the service * `__meta_dockerswarm_service_name`: the name of the service * `__meta_dockerswarm_service_mode`: the mode of the service -* `__meta_dockerswarm_service_label_`: each label of the service +* `__meta_dockerswarm_service_label_`: each label of the service, with any unsupported characters converted to an underscore * `__meta_dockerswarm_network_id`: the ID of the network * `__meta_dockerswarm_network_name`: the name of the network * `__meta_dockerswarm_network_ingress`: whether the network is ingress * `__meta_dockerswarm_network_internal`: whether the network is internal -* `__meta_dockerswarm_network_label_`: each label of the network -* `__meta_dockerswarm_network_label`: each label of the network +* `__meta_dockerswarm_network_label_`: each label of the network, with any unsupported characters converted to an underscore +* `__meta_dockerswarm_network_label`: each label of the network, with any unsupported characters converted to an underscore * `__meta_dockerswarm_network_scope`: the scope of the network * `__meta_dockerswarm_node_id`: the ID of the node * `__meta_dockerswarm_node_hostname`: the hostname of the node * `__meta_dockerswarm_node_address`: the address of the node * `__meta_dockerswarm_node_availability`: the availability of the node -* `__meta_dockerswarm_node_label_`: each label of the node +* `__meta_dockerswarm_node_label_`: each label of the node, with any unsupported characters converted to an underscore * `__meta_dockerswarm_node_platform_architecture`: the architecture of the node * `__meta_dockerswarm_node_platform_os`: the operating system of the node * `__meta_dockerswarm_node_role`: the role of the node @@ -994,7 +1195,7 @@ Available meta labels: * `__meta_dockerswarm_node_engine_version`: the version of the node engine * `__meta_dockerswarm_node_hostname`: the hostname of the node * `__meta_dockerswarm_node_id`: the ID of the node -* `__meta_dockerswarm_node_label_`: each label of the node +* `__meta_dockerswarm_node_label_`: each label of the node, with any unsupported characters converted to an underscore * `__meta_dockerswarm_node_manager_address`: the address of the manager component of the node * `__meta_dockerswarm_node_manager_leader`: the leadership status of the manager component of the node (true or false) * `__meta_dockerswarm_node_manager_reachability`: the reachability of the manager component of the node @@ -1021,6 +1222,18 @@ host: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -1048,11 +1261,13 @@ role: # Authentication information used to authenticate to the Docker daemon. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1094,7 +1309,7 @@ A DNS-based service discovery configuration allows specifying a set of DNS domain names which are periodically queried to discover a list of targets. The DNS servers to be contacted are read from `/etc/resolv.conf`. -This service discovery method only supports basic DNS A, AAAA, MX and SRV +This service discovery method only supports basic DNS A, AAAA, MX, NS and SRV record queries, but not the advanced DNS-SD approach specified in [RFC6763](https://tools.ietf.org/html/rfc6763). @@ -1104,13 +1319,14 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_dns_srv_record_target`: the target field of the SRV record * `__meta_dns_srv_record_port`: the port field of the SRV record * `__meta_dns_mx_record_target`: the target field of the MX record +* `__meta_dns_ns_record_target`: the target field of the NS record ```yaml # A list of DNS domain names to be queried. names: [ - ] -# The type of DNS query to perform. One of SRV, A, AAAA or MX. +# The type of DNS query to perform. One of SRV, A, AAAA, MX or NS. [ type: | default = 'SRV' ] # The port number used if the query type is not SRV. @@ -1144,6 +1360,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_ec2_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present * `__meta_ec2_owner_id`: the ID of the AWS account that owns the EC2 instance * `__meta_ec2_platform`: the Operating System platform, set to 'windows' on Windows servers, absent otherwise +* `__meta_ec2_primary_ipv6_addresses`: comma separated list of the Primary IPv6 addresses of the instance, if present. The list is ordered based on the position of each corresponding network interface in the attachment order. * `__meta_ec2_primary_subnet_id`: the subnet ID of the primary network interface, if available * `__meta_ec2_private_dns_name`: the private DNS name of the instance, if available * `__meta_ec2_private_ip`: the private IP address of the instance, if present @@ -1193,11 +1410,13 @@ filters: # Authentication information used to authenticate to the EC2 API. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information, currently not supported by AWS. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1209,7 +1428,7 @@ authorization: # `credentials_file`. [ credentials: ] # Sets the credentials to the credentials read from the configured file. - # It is mutuall exclusive with `credentials`. + # It is mutually exclusive with `credentials`. [ credentials_file: ] # Optional OAuth 2.0 configuration, currently not supported by AWS. @@ -1228,6 +1447,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1274,7 +1505,7 @@ interface. The following meta labels are available on targets during [relabeling](#relabel_config): * `__meta_openstack_address_pool`: the pool of the private IP. -* `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance. +* `__meta_openstack_instance_flavor`: the flavor name of the OpenStack instance, or the flavor ID if the flavor name isn't available. * `__meta_openstack_instance_id`: the OpenStack instance ID. * `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using. * `__meta_openstack_instance_name`: the OpenStack instance name. @@ -1282,7 +1513,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_openstack_private_ip`: the private IP of the OpenStack instance. * `__meta_openstack_project_id`: the project (tenant) owning this instance. * `__meta_openstack_public_ip`: the public IP of the OpenStack instance. -* `__meta_openstack_tag_`: each tag value of the instance. +* `__meta_openstack_tag_`: each metadata item of the instance, with any unsupported characters converted to an underscore. * `__meta_openstack_user_id`: the user account owning the tenant. See below for the configuration options for OpenStack discovery: @@ -1392,6 +1623,7 @@ For OVHcloud's [public cloud instances](https://www.ovhcloud.com/en/public-cloud * `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server * `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server * `__meta_ovhcloud_dedicated_server_name`: the name of the server +* `__meta_ovhcloud_dedicated_server_no_intervention`: whether datacenter intervention is disabled for the server * `__meta_ovhcloud_dedicated_server_os`: the operating system of the server * `__meta_ovhcloud_dedicated_server_rack`: the rack of the server * `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server @@ -1473,6 +1705,7 @@ tls_config: # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1504,6 +1737,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1522,7 +1767,16 @@ and serves as an interface to plug in custom service discovery mechanisms. It reads a set of files containing a list of zero or more ``s. Changes to all defined files are detected via disk watches -and applied immediately. Files may be provided in YAML or JSON format. Only +and applied immediately. + +While those individual files are watched for changes, +the parent directory is also watched implicitly. This is to handle [atomic +renaming](https://github.com/fsnotify/fsnotify/blob/c1467c02fba575afdb5f4201072ab8403bbf00f4/README.md?plain=1#L128) efficiently and to detect new files that match the configured globs. +This may cause issues if the parent directory contains a large number of other files, +as each of these files will be watched too, even though the events related +to them are not relevant. + +Files may be provided in YAML or JSON format. Only changes resulting in well-formed target groups are applied. Files must contain a list of static configs, using these formats: @@ -1583,7 +1837,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_gce_instance_id`: the numeric id of the instance * `__meta_gce_instance_name`: the name of the instance -* `__meta_gce_label_`: each GCE label of the instance +* `__meta_gce_label_`: each GCE label of the instance, with any unsupported characters converted to an underscore * `__meta_gce_machine_type`: full or partial URL of the machine type of the instance * `__meta_gce_metadata_`: each metadata item of the instance * `__meta_gce_network`: the network URL of the instance @@ -1667,8 +1921,8 @@ The labels below are only available for targets with `role` set to `hcloud`: * `__meta_hetzner_hcloud_memory_size_gb`: the amount of memory of the server (in GB) * `__meta_hetzner_hcloud_disk_size_gb`: the disk size of the server (in GB) * `__meta_hetzner_hcloud_private_ipv4_`: the private ipv4 address of the server within a given network -* `__meta_hetzner_hcloud_label_`: each label of the server -* `__meta_hetzner_hcloud_labelpresent_`: `true` for each label of the server +* `__meta_hetzner_hcloud_label_`: each label of the server, with any unsupported characters converted to an underscore +* `__meta_hetzner_hcloud_labelpresent_`: `true` for each label of the server, with any unsupported characters converted to an underscore The labels below are only available for targets with `role` set to `robot`: @@ -1683,12 +1937,14 @@ role: # Authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information, required when role is robot # Role hcloud does not support basic auth. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1721,6 +1977,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1780,11 +2048,13 @@ url: # Authentication information used to authenticate to the API server. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1815,6 +2085,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1860,12 +2142,14 @@ datacenter_id: # Authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information, required when using IONOS # Cloud username and password as authentication method. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1898,6 +2182,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1935,8 +2231,8 @@ Available meta labels: * `__meta_kubernetes_node_name`: The name of the node object. * `__meta_kubernetes_node_provider_id`: The cloud provider's name for the node object. -* `__meta_kubernetes_node_label_`: Each label from the node object. -* `__meta_kubernetes_node_labelpresent_`: `true` for each label from the node object. +* `__meta_kubernetes_node_label_`: Each label from the node object, with any unsupported characters converted to an underscore. +* `__meta_kubernetes_node_labelpresent_`: `true` for each label from the node object, with any unsupported characters converted to an underscore. * `__meta_kubernetes_node_annotation_`: Each annotation from the node object. * `__meta_kubernetes_node_annotationpresent_`: `true` for each annotation from the node object. * `__meta_kubernetes_node_address_`: The first address for each node address type, if it exists. @@ -1959,8 +2255,8 @@ Available meta labels: * `__meta_kubernetes_service_cluster_ip`: The cluster IP address of the service. (Does not apply to services of type ExternalName) * `__meta_kubernetes_service_loadbalancer_ip`: The IP address of the loadbalancer. (Applies to services of type LoadBalancer) * `__meta_kubernetes_service_external_name`: The DNS name of the service. (Applies to services of type ExternalName) -* `__meta_kubernetes_service_label_`: Each label from the service object. -* `__meta_kubernetes_service_labelpresent_`: `true` for each label of the service object. +* `__meta_kubernetes_service_label_`: Each label from the service object, with any unsupported characters converted to an underscore. +* `__meta_kubernetes_service_labelpresent_`: `true` for each label of the service object, with any unsupported characters converted to an underscore. * `__meta_kubernetes_service_name`: The name of the service object. * `__meta_kubernetes_service_port_name`: Name of the service port for the target. * `__meta_kubernetes_service_port_number`: Number of the service port for the target. @@ -1978,8 +2274,8 @@ Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the pod object. * `__meta_kubernetes_pod_name`: The name of the pod object. * `__meta_kubernetes_pod_ip`: The pod IP of the pod object. -* `__meta_kubernetes_pod_label_`: Each label from the pod object. -* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from the pod object. +* `__meta_kubernetes_pod_label_`: Each label from the pod object, with any unsupported characters converted to an underscore. +* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from the pod object, with any unsupported characters converted to an underscore. * `__meta_kubernetes_pod_annotation_`: Each annotation from the pod object. * `__meta_kubernetes_pod_annotationpresent_`: `true` for each annotation from the pod object. * `__meta_kubernetes_pod_container_init`: `true` if the container is an [InitContainer](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) @@ -2008,8 +2304,8 @@ Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the endpoints object. * `__meta_kubernetes_endpoints_name`: The names of the endpoints object. -* `__meta_kubernetes_endpoints_label_`: Each label from the endpoints object. -* `__meta_kubernetes_endpoints_labelpresent_`: `true` for each label from the endpoints object. +* `__meta_kubernetes_endpoints_label_`: Each label from the endpoints object, with any unsupported characters converted to an underscore. +* `__meta_kubernetes_endpoints_labelpresent_`: `true` for each label from the endpoints object, with any unsupported characters converted to an underscore. * `__meta_kubernetes_endpoints_annotation_`: Each annotation from the endpoints object. * `__meta_kubernetes_endpoints_annotationpresent_`: `true` for each annotation from the endpoints object. * For all targets discovered directly from the endpoints list (those not additionally inferred @@ -2030,12 +2326,14 @@ The `endpointslice` role discovers targets from existing endpointslices. For eac address referenced in the endpointslice object one target is discovered. If the endpoint is backed by a pod, all additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well. +The role requires the `discovery.k8s.io/v1` API version (available since Kubernetes v1.21). + Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the endpoints object. * `__meta_kubernetes_endpointslice_name`: The name of endpointslice object. -* `__meta_kubernetes_endpointslice_label_`: Each label from the endpointslice object. -* `__meta_kubernetes_endpointslice_labelpresent_`: `true` for each label from the endpointslice object. +* `__meta_kubernetes_endpointslice_label_`: Each label from the endpointslice object, with any unsupported characters converted to an underscore. +* `__meta_kubernetes_endpointslice_labelpresent_`: `true` for each label from the endpointslice object, with any unsupported characters converted to an underscore. * `__meta_kubernetes_endpointslice_annotation_`: Each annotation from the endpointslice object. * `__meta_kubernetes_endpointslice_annotationpresent_`: `true` for each annotation from the endpointslice object. * For all targets discovered directly from the endpointslice list (those not additionally inferred @@ -2043,11 +2341,14 @@ Available meta labels: * `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the referenced object. * `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced object. * `__meta_kubernetes_endpointslice_address_type`: The ip protocol family of the address of the target. - * `__meta_kubernetes_endpointslice_endpoint_conditions_ready`: Set to `true` or `false` for the referenced endpoint's ready state. - * `__meta_kubernetes_endpointslice_endpoint_conditions_serving`: Set to `true` or `false` for the referenced endpoint's serving state. - * `__meta_kubernetes_endpointslice_endpoint_conditions_terminating`: Set to `true` or `false` for the referenced endpoint's terminating state. - * `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`: Name of the node hosting the referenced endpoint. + * `__meta_kubernetes_endpointslice_endpoint_conditions_ready`: Set to `true` or `false` for the referenced endpoint's ready state. + * `__meta_kubernetes_endpointslice_endpoint_conditions_serving`: Set to `true` or `false` for the referenced endpoint's serving state. + * `__meta_kubernetes_endpointslice_endpoint_conditions_terminating`: Set to `true` or `false` for the referenced endpoint's terminating state. + * `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`: Name of the node hosting the referenced endpoint. * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation. + * `__meta_kubernetes_endpointslice_endpoint_hostname`: Hostname of the referenced endpoint. + * `__meta_kubernetes_endpointslice_endpoint_node_name`: Name of the Node hosting the referenced endpoint. + * `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in. * `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint. * `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint. * `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint. @@ -2060,12 +2361,14 @@ The `ingress` role discovers a target for each path of each ingress. This is generally useful for blackbox monitoring of an ingress. The address will be set to the host specified in the ingress spec. +The role requires the `networking.k8s.io/v1` API version (available since Kubernetes v1.19). + Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the ingress object. * `__meta_kubernetes_ingress_name`: The name of the ingress object. -* `__meta_kubernetes_ingress_label_`: Each label from the ingress object. -* `__meta_kubernetes_ingress_labelpresent_`: `true` for each label from the ingress object. +* `__meta_kubernetes_ingress_label_`: Each label from the ingress object, with any unsupported characters converted to an underscore. +* `__meta_kubernetes_ingress_labelpresent_`: `true` for each label from the ingress object, with any unsupported characters converted to an underscore. * `__meta_kubernetes_ingress_annotation_`: Each annotation from the ingress object. * `__meta_kubernetes_ingress_annotationpresent_`: `true` for each annotation from the ingress object. * `__meta_kubernetes_ingress_class_name`: Class name from ingress spec, if present. @@ -2093,11 +2396,13 @@ role: # Optional authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2129,6 +2434,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2197,6 +2514,11 @@ See below for the configuration options for Kuma MonitoringAssignment discovery: # Address of the Kuma Control Plane's MADS xDS server. server: +# Client id is used by Kuma Control Plane to compute Monitoring Assignment for specific Prometheus backend. +# This is useful when migrating between multiple Prometheus backends, or having separate backend for each Mesh. +# When not specified, system hostname/fqdn will be used if available, if not `prometheus` will be used. +[ client_id: ] + # The time to wait between polling update requests. [ refresh_interval: | default = 30s ] @@ -2215,6 +2537,18 @@ server: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -2222,11 +2556,13 @@ tls_config: # Authentication information used to authenticate to the Docker daemon. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2307,11 +2643,13 @@ See below for the configuration options for Lightsail discovery: # Authentication information used to authenticate to the Lightsail API. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information, currently not supported by AWS. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2323,7 +2661,7 @@ authorization: # `credentials_file`. [ credentials: ] # Sets the credentials to the credentials read from the configured file. - # It is mutuall exclusive with `credentials`. + # It is mutually exclusive with `credentials`. [ credentials_file: ] # Optional OAuth 2.0 configuration, currently not supported by AWS. @@ -2342,6 +2680,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2369,11 +2719,15 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_linode_private_ipv4`: the private IPv4 of the linode instance * `__meta_linode_public_ipv4`: the public IPv4 of the linode instance * `__meta_linode_public_ipv6`: the public IPv6 of the linode instance +* `__meta_linode_private_ipv4_rdns`: the reverse DNS for the first private IPv4 of the linode instance +* `__meta_linode_public_ipv4_rdns`: the reverse DNS for the first public IPv4 of the linode instance +* `__meta_linode_public_ipv6_rdns`: the reverse DNS for the first public IPv6 of the linode instance * `__meta_linode_region`: the region of the linode instance * `__meta_linode_type`: the type of the linode instance * `__meta_linode_status`: the status of the linode instance * `__meta_linode_tags`: a list of tags of the linode instance joined by the tag separator * `__meta_linode_group`: the display group a linode instance is a member of +* `__meta_linode_gpus`: the number of GPU's of the linode instance * `__meta_linode_hypervisor`: the virtualization software powering the linode instance * `__meta_linode_backups`: the backup service status of the linode instance * `__meta_linode_specs_disk_bytes`: the amount of storage space the linode instance has access to @@ -2381,17 +2735,20 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_linode_specs_vcpus`: the number of VCPUS this linode has access to * `__meta_linode_specs_transfer_bytes`: the amount of network transfer the linode instance is allotted each month * `__meta_linode_extra_ips`: a list of all extra IPv4 addresses assigned to the linode instance joined by the tag separator +* `__meta_linode_ipv6_ranges`: a list of IPv6 ranges with mask assigned to the linode instance joined by the tag separator ```yaml # Authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Note: Linode APIv4 Token must be created with scopes: 'linodes:read_only', 'ips:read_only', and 'events:read_only' # Optional HTTP basic authentication information, not currently supported by Linode APIv4. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2411,6 +2768,9 @@ authorization: oauth2: [ ] +# Optional region to filter on. +[ region: ] + # Optional proxy URL. [ proxy_url: ] # Comma-separated string that can contain IPs, CIDR notation, domain names @@ -2423,6 +2783,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2455,9 +2827,9 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_marathon_app`: the name of the app (with slashes replaced by dashes) * `__meta_marathon_image`: the name of the Docker image used (if available) * `__meta_marathon_task`: the ID of the Mesos task -* `__meta_marathon_app_label_`: any Marathon labels attached to the app -* `__meta_marathon_port_definition_label_`: the port definition labels -* `__meta_marathon_port_mapping_label_`: the port mapping labels +* `__meta_marathon_app_label_`: any Marathon labels attached to the app, with any unsupported characters converted to an underscore +* `__meta_marathon_port_definition_label_`: the port definition labels, with any unsupported characters converted to an underscore +* `__meta_marathon_port_mapping_label_`: the port mapping labels, with any unsupported characters converted to an underscore * `__meta_marathon_port_index`: the port index number (e.g. `1` for `PORT1`) See below for the configuration options for Marathon discovery: @@ -2484,9 +2856,11 @@ servers: # Sets the `Authorization` header on every request with the # configured username and password. # This is mutually exclusive with other authentication mechanisms. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2530,6 +2904,18 @@ tls_config: # Specifies headers to send to proxies during CONNECT requests. [ proxy_connect_header: [ : [, ...] ] ] + +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] ``` By default every app listed in Marathon will be scraped by Prometheus. If not all @@ -2594,11 +2980,13 @@ The following meta labels are available on targets during [relabeling](#relabel_ # Authentication information used to authenticate to the nomad server. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2629,6 +3017,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2777,9 +3177,11 @@ server: # Sets the `Authorization` header on every request with the # configured username and password. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2815,6 +3217,18 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2861,9 +3275,10 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_scaleway_instance_type`: commercial type of the server * `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction)) -This role uses the private IPv4 address by default. This can be +This role uses the first address it finds in the following order: private IPv4, public IPv4, public IPv6. This can be changed with relabeling, as demonstrated in [the Prometheus scaleway-sd configuration file](/documentation/examples/prometheus-scaleway.yml). +Should an instance have no address before relabeling, it will not be added to the target list and you will not be able to relabel it. #### Baremetal role @@ -2940,6 +3355,18 @@ tags_filter: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -2985,8 +3412,9 @@ password: # Optional HTTP basic authentication information, currently not supported by Uyuni. basic_auth: [ username: ] - [ password: ] - [ password_file: ] + [ username_file: ] + [ password: ] + [ password_file: ] # Optional `Authorization` header configuration, currently not supported by Uyuni. authorization: @@ -3016,6 +3444,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3062,11 +3502,13 @@ The following meta labels are available on targets during [relabeling](#relabel_ # Authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information, not currently supported by Vultr. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -3098,6 +3540,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3143,12 +3597,16 @@ Initially, aside from the configured per-target labels, a target's `job` label is set to the `job_name` value of the respective scrape configuration. The `__address__` label is set to the `:` address of the target. After relabeling, the `instance` label is set to the value of `__address__` by default if -it was not set during relabeling. The `__scheme__` and `__metrics_path__` labels -are set to the scheme and metrics path of the target respectively. The `__param_` -label is set to the value of the first passed URL parameter called ``. +it was not set during relabeling. + +The `__scheme__` and `__metrics_path__` labels +are set to the scheme and metrics path of the target respectively, as specified in `scrape_config`. + +The `__param_` +label is set to the value of the first passed URL parameter called ``, as defined in `scrape_config`. The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's -interval and timeout. This is **experimental** and could change in the future. +interval and timeout, as specified in `scrape_config`. Additional labels prefixed with `__meta_` may be available during the relabeling phase. They are set by the service discovery mechanism that provided @@ -3262,9 +3720,11 @@ through the `__alerts_path__` label. # Sets the `Authorization` header on every request with the # configured username and password. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -3279,6 +3739,25 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] +# Optionally configures AWS's Signature Verification 4 signing process to sign requests. +# Cannot be set at the same time as basic_auth, authorization, oauth2, azuread or google_iam. +# To use the default credentials from the AWS SDK, use `sigv4: {}`. +sigv4: + # The AWS region. If blank, the region from the default credentials chain + # is used. + [ region: ] + + # The AWS API keys. If blank, the environment variables `AWS_ACCESS_KEY_ID` + # and `AWS_SECRET_ACCESS_KEY` are used. + [ access_key: ] + [ secret_key: ] + + # Named AWS profile used to authenticate. + [ profile: ] + + # AWS Role ARN, an alternative to using AWS API keys. + [ role_arn: ] + # Optional OAuth 2.0 configuration. # Cannot be used at the same time as basic_auth or authorization. oauth2: @@ -3300,6 +3779,18 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3421,6 +3912,10 @@ static_configs: # List of Alertmanager relabel configurations. relabel_configs: [ - ... ] + +# List of alert relabel configurations. +alert_relabel_configs: + [ - ... ] ``` ### `` @@ -3436,6 +3931,17 @@ this functionality. # The URL of the endpoint to send samples to. url: +# protobuf message to use when writing to the remote write endpoint. +# +# * The `prometheus.WriteRequest` represents the message introduced in Remote Write 1.0, which +# will be deprecated eventually. +# * The `io.prometheus.write.v2.Request` was introduced in Remote Write 2.0 and replaces the former, +# by improving efficiency and sending metadata, created timestamp and native histograms by default. +# +# Before changing this value, consult with your remote storage provider (or test) what message it supports. +# Read more on https://prometheus.io/docs/specs/remote_write_spec_2_0/#io-prometheus-write-v2-request +[ protobuf_message: | default = prometheus.WriteRequest ] + # Timeout for requests to the remote write endpoint. [ remote_timeout: | default = 30s ] @@ -3457,20 +3963,23 @@ write_relabel_configs: [ send_exemplars: | default = false ] # Enables sending of native histograms, also known as sparse histograms, over remote write. +# For the `io.prometheus.write.v2.Request` message, this option is noop (always true). [ send_native_histograms: | default = false ] # Sets the `Authorization` header on every remote write request with the # configured username and password. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] # Optional `Authorization` header configuration. authorization: # Sets the authentication type. - [ type: | default: Bearer ] + [ type: | default = Bearer ] # Sets the credentials. It is mutually exclusive with # `credentials_file`. [ credentials: ] @@ -3498,19 +4007,38 @@ sigv4: [ role_arn: ] # Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread. +# Cannot be used at the same time as basic_auth, authorization, sigv4, azuread or google_iam. oauth2: [ ] # Optional AzureAD configuration. -# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4. +# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or google_iam. azuread: # The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. [ cloud: | default = AzurePublic ] # Azure User-assigned Managed identity. [ managed_identity: - [ client_id: ] + [ client_id: ] ] + + # Azure OAuth. + [ oauth: + [ client_id: ] + [ client_secret: ] + [ tenant_id: ] ] + + # Azure SDK auth. + # See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication + [ sdk: + [ tenant_id: ] ] + +# WARNING: Remote write is NOT SUPPORTED by Google Cloud. This configuration is reserved for future use. +# Optional Google Cloud Monitoring configuration. +# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread. +# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`. +google_iam: + # Service account key with monitoring write permissions. + credentials_file: # Configures the remote write request's TLS settings. tls_config: @@ -3523,16 +4051,28 @@ tls_config: # contain port numbers. [ no_proxy: ] # Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] +[ proxy_from_environment: | default = false ] # Specifies headers to send to proxies during CONNECT requests. [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default = true ] # Configures the queue used to write to remote storage. queue_config: @@ -3540,14 +4080,15 @@ queue_config: # samples from the WAL. It is recommended to have enough capacity in each # shard to buffer several requests to keep throughput up while processing # occasional slow remote requests. - [ capacity: | default = 2500 ] + [ capacity: | default = 10000 ] # Maximum number of shards, i.e. amount of concurrency. - [ max_shards: | default = 200 ] + [ max_shards: | default = 50 ] # Minimum number of shards, i.e. amount of concurrency. [ min_shards: | default = 1 ] # Maximum number of samples per send. - [ max_samples_per_send: | default = 500] - # Maximum time a sample will wait in buffer. + [ max_samples_per_send: | default = 2000] + # Maximum time a sample will wait for a send. The sample might wait less + # if the buffer is full. Further time might pass due to potential retries. [ batch_send_deadline: | default = 5s ] # Initial retry delay. Gets doubled for every retry. [ min_backoff: | default = 30ms ] @@ -3556,8 +4097,15 @@ queue_config: # Retry upon receiving a 429 status code from the remote-write storage. # This is experimental and might change in the future. [ retry_on_http_429: | default = false ] - -# Configures the sending of series metadata to remote storage. + # If set, any sample that is older than sample_age_limit + # will not be sent to the remote storage. The default value is 0s, + # which means that all samples are sent. + [ sample_age_limit: | default = 0s ] + +# Configures the sending of series metadata to remote storage +# if the `prometheus.WriteRequest` message was chosen. When +# `io.prometheus.write.v2.Request` is used, metadata is always sent. +# # Metadata configuration is subject to change at any point # or be removed in future releases. metadata_config: @@ -3603,9 +4151,11 @@ headers: # Sets the `Authorization` header on every remote read request with the # configured username and password. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -3641,6 +4191,18 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3671,6 +4233,10 @@ NOTE: Out-of-order ingestion is an experimental feature, but you do not need any # into the TSDB, i.e. it is an in-order sample or an out-of-order/out-of-bounds sample # that is within the out-of-order window, or (b) too-old, i.e. not in-order # and before the out-of-order window. +# +# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows +# the agent's WAL to accept out-of-order samples that fall within the specified time window relative +# to the timestamp of the last appended sample for the same series. [ out_of_order_time_window: | default = 0s ] ``` diff --git a/docs/configuration/recording_rules.md b/docs/configuration/recording_rules.md index eda0214b35..9aa226bbc0 100644 --- a/docs/configuration/recording_rules.md +++ b/docs/configuration/recording_rules.md @@ -86,6 +86,9 @@ name: # rule can produce. 0 is no limit. [ limit: | default = 0 ] +# Offset the rule evaluation timestamp of this particular group by the specified duration into the past. +[ query_offset: | default = global.rule_query_offset ] + rules: [ - ... ] ``` @@ -147,3 +150,10 @@ by the rule are discarded, and if it's an alerting rule, _all_ alerts for the rule, active, pending, or inactive, are cleared as well. The event will be recorded as an error in the evaluation, and as such no stale markers are written. + +# Rule query offset +This is useful to ensure the underlying metrics have been received and stored in Prometheus. Metric availability delays are more likely to occur when Prometheus is running as a remote write target due to the nature of distributed systems, but can also occur when there's anomalies with scraping and/or short evaluation intervals. + +# Failed rule evaluations due to slow evaluation + +If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group. diff --git a/docs/configuration/template_reference.md b/docs/configuration/template_reference.md index 12effae32f..47df9d1e09 100644 --- a/docs/configuration/template_reference.md +++ b/docs/configuration/template_reference.md @@ -18,7 +18,7 @@ The primary data structure for dealing with time series data is the sample, defi ```go type sample struct { Labels map[string]string - Value float64 + Value interface{} } ``` @@ -44,7 +44,7 @@ If functions are used in a pipeline, the pipeline value is passed as the last ar | query | query string | []sample | Queries the database, does not support returning range vectors. | | first | []sample | sample | Equivalent to `index a 0` | | label | label, sample | string | Equivalent to `index sample.Labels label` | -| value | sample | float64 | Equivalent to `sample.Value` | +| value | sample | interface{} | Equivalent to `sample.Value` | | sortByLabel | label, []samples | []sample | Sorts the samples by the given label. Is stable. | `first`, `label` and `value` are intended to make query results easily usable in pipelines. @@ -86,6 +86,7 @@ versions. | args | []interface{} | map[string]interface{} | This converts a list of objects to a map with keys arg0, arg1 etc. This is intended to allow multiple arguments to be passed to templates. | | tmpl | string, []interface{} | nothing | Like the built-in `template`, but allows non-literals as the template name. Note that the result is assumed to be safe, and will not be auto-escaped. Only available in consoles. | | safeHtml | string | string | Marks string as HTML not requiring auto-escaping. | +| externalURL | _none_ | string | The external URL under which Prometheus is externally reachable. | | pathPrefix | _none_ | string | The external URL [path](https://pkg.go.dev/net/url#URL) for use in console templates. | ## Template type differences diff --git a/docs/configuration/unit_testing_rules.md b/docs/configuration/unit_testing_rules.md index efd168b358..7fc676a251 100644 --- a/docs/configuration/unit_testing_rules.md +++ b/docs/configuration/unit_testing_rules.md @@ -39,7 +39,7 @@ tests: ``` yaml # Series data -interval: +[ interval: | default = evaluation_interval ] input_series: [ - ] @@ -76,18 +76,51 @@ series: # This uses expanding notation. # Expanding notation: -# 'a+bxc' becomes 'a a+b a+(2*b) a+(3*b) … a+(c*b)' -# Read this as series starts at a, then c further samples incrementing by b. -# 'a-bxc' becomes 'a a-b a-(2*b) a-(3*b) … a-(c*b)' -# Read this as series starts at a, then c further samples decrementing by b (or incrementing by negative b). +# 'a+bxn' becomes 'a a+b a+(2*b) a+(3*b) … a+(n*b)' +# Read this as series starts at a, then n further samples incrementing by b. +# 'a-bxn' becomes 'a a-b a-(2*b) a-(3*b) … a-(n*b)' +# Read this as series starts at a, then n further samples decrementing by b (or incrementing by negative b). +# 'axn' becomes 'a a a … a' (a n+1 times) - it's a shorthand for 'a+0xn' # There are special values to indicate missing and stale samples: -# '_' represents a missing sample from scrape -# 'stale' indicates a stale sample +# '_' represents a missing sample from scrape +# 'stale' indicates a stale sample # Examples: # 1. '-2+4x3' becomes '-2 2 6 10' - series starts at -2, then 3 further samples incrementing by 4. # 2. ' 1-2x4' becomes '1 -1 -3 -5 -7' - series starts at 1, then 4 further samples decrementing by 2. # 3. ' 1x4' becomes '1 1 1 1 1' - shorthand for '1+0x4', series starts at 1, then 4 further samples incrementing by 0. # 4. ' 1 _x3 stale' becomes '1 _ _ _ stale' - the missing sample cannot increment, so 3 missing samples are produced by the '_x3' expression. +# +# Native histogram notation: +# Native histograms can be used instead of floating point numbers using the following notation: +# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}} +# Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'. +# All properties are optional and default to 0. The order is not important. The following properties are supported: +# - schema (int): +# Currently valid schema numbers are -4 <= n <= 8. They are all for +# base-2 bucket schemas, where 1 is a bucket boundary in each case, and +# then each power of two is divided into 2^n logarithmic buckets. Or +# in other words, each bucket boundary is the previous boundary times +# 2^(2^-n). +# - sum (float): +# The sum of all observations, including the zero bucket. +# - count (non-negative float): +# The number of observations, including those that are NaN and including the zero bucket. +# - z_bucket (non-negative float): +# The sum of all observations in the zero bucket. +# - z_bucket_w (non-negative float): +# The width of the zero bucket. +# If z_bucket_w > 0, the zero bucket contains all observations -z_bucket_w <= x <= z_bucket_w. +# Otherwise, the zero bucket only contains observations that are exactly 0. +# - buckets (list of non-negative floats): +# Observation counts in positive buckets. Each represents an absolute count. +# - offset (int): +# The starting index of the first entry in the positive buckets. +# - n_buckets (list of non-negative floats): +# Observation counts in negative buckets. Each represents an absolute count. +# - n_offset (int): +# The starting index of the first entry in the negative buckets. +# - counter_reset_hint (one of 'unknown', 'reset', 'not_reset' or 'gauge') +# The counter reset hint associated with this histogram. Defaults to 'unknown' if not set. values: ``` diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 1cf54c47f8..a3e2c0b9e9 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -20,21 +20,13 @@ values according to the values of the current environment variables. References to undefined variables are replaced by the empty string. The `$` character can be escaped by using `$$`. -## Remote Write Receiver - -`--enable-feature=remote-write-receiver` - -The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview). - -Activating the remote write receiver via a feature flag is deprecated. Use `--web.enable-remote-write-receiver` instead. This feature flag will be ignored in future versions of Prometheus. - ## Exemplars storage `--enable-feature=exemplar-storage` [OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars) introduces the ability for scrape targets to add exemplars to certain metrics. Exemplars are references to data outside of the MetricSet. A common use case are IDs of program traces. -Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The config file block [storage](configuration/configuration.md#configuration-file)/[exemplars](configuration/configuration.md#exemplars) can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `traceID=` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration). +Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The config file block [storage](configuration/configuration.md#configuration-file)/[exemplars](configuration/configuration.md#exemplars) can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `trace_id=` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration). ## Memory snapshot on shutdown @@ -55,30 +47,6 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`. - `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`. -## New service discovery manager - -`--enable-feature=new-service-discovery-manager` - -When enabled, Prometheus uses a new service discovery manager that does not -restart unchanged discoveries upon reloading. This makes reloads faster and reduces -pressure on service discoveries' sources. - -Users are encouraged to test the new service discovery manager and report any -issues upstream. - -In future releases, this new service discovery manager will become the default and -this feature flag will be ignored. - -## Prometheus agent - -`--enable-feature=agent` - -When enabled, Prometheus runs in agent mode. The agent mode is limited to -discovery, scrape and remote write. - -This is useful when you do not need to query the Prometheus data locally, but -only from a central [remote endpoint](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). - ## Per-step stats `--enable-feature=promql-per-step-stats` @@ -95,14 +63,13 @@ computed at all. When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota. -## No default scrape port +## Auto GOMEMLIMIT -`--enable-feature=no-default-scrape-port` +`--enable-feature=auto-gomemlimit` -When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be added to -the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior. -In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or -by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed. +When enabled, the GOMEMLIMIT variable is automatically set to match the Linux container memory limit. If there is no container limit, or the process is running outside of containers, the system memory total is used. + +There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus. ## Native Histograms @@ -119,18 +86,144 @@ also experimental) protobuf parser, through which _all_ metrics are ingested (i.e. not only native histograms). Prometheus will try to negotiate the protobuf format first. The instrumented target needs to support the protobuf format, too, _and_ it needs to expose native histograms. The protobuf format -allows to expose conventional and native histograms side by side. With this -feature flag disabled, Prometheus will continue to parse the conventional -histogram (albeit via the text format). With this flag enabled, Prometheus will -still ingest those conventional histograms that do not come with a -corresponding native histogram. However, if a native histogram is present, -Prometheus will ignore the corresponding conventional histogram, with the -notable exception of exemplars, which are always ingested. +allows to expose classic and native histograms side by side. With this feature +flag disabled, Prometheus will continue to parse the classic histogram (albeit +via the text format). With this flag enabled, Prometheus will still ingest +those classic histograms that do not come with a corresponding native +histogram. However, if a native histogram is present, Prometheus will ignore +the corresponding classic histogram, with the notable exception of exemplars, +which are always ingested. To keep the classic histograms as well, enable +`scrape_classic_histograms` in the scrape job. + +_Note about the format of `le` and `quantile` label values:_ + +In certain situations, the protobuf parsing changes the number formatting of +the `le` labels of classic histograms and the `quantile` labels of +summaries. Typically, this happens if the scraped target is instrumented with +[client_golang](https://github.com/prometheus/client_golang) provided that +[promhttp.HandlerOpts.EnableOpenMetrics](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus/promhttp#HandlerOpts) +is set to `false`. In such a case, integer label values are represented in the +text format as such, e.g. `quantile="1"` or `le="2"`. However, the protobuf parsing +changes the representation to float-like (following the OpenMetrics +specification), so the examples above become `quantile="1.0"` and `le="2.0"` after +ingestion into Prometheus, which changes the identity of the metric compared to +what was ingested before via the text format. + +The effect of this change is that alerts, recording rules and dashboards that +directly reference label values as whole numbers such as `le="1"` will stop +working. + +Aggregation by the `le` and `quantile` labels for vectors that contain the old and +new formatting will lead to unexpected results, and range vectors that span the +transition between the different formatting will contain additional series. +The most common use case for both is the quantile calculation via +`histogram_quantile`, e.g. +`histogram_quantile(0.95, sum by (le) (rate(histogram_bucket[10m])))`. +The `histogram_quantile` function already tries to mitigate the effects to some +extent, but there will be inaccuracies, in particular for shorter ranges that +cover only a few samples. + +Ways to deal with this change either globally or on a per metric basis: + +- Fix references to integer `le`, `quantile` label values, but otherwise do +nothing and accept that some queries that span the transition time will produce +inaccurate or unexpected results. +_This is the recommended solution, to get consistently normalized label values._ +Also Prometheus 3.0 is expected to enforce normalization of these label values. +- Use `metric_relabel_config` to retain the old labels when scraping targets. +This should **only** be applied to metrics that currently produce such labels. + + +```yaml + metric_relabel_configs: + - source_labels: + - quantile + target_label: quantile + regex: (\d+)\.0+ + - source_labels: + - le + - __name__ + target_label: le + regex: (\d+)\.0+;.*_bucket +``` + +## Experimental PromQL functions + +`--enable-feature=promql-experimental-functions` + +Enables PromQL functions that are considered experimental. These functions +might change their name, syntax, or semantics. They might also get removed +entirely. + +## Created Timestamps Zero Injection + +`--enable-feature=created-timestamp-zero-ingestion` + +Enables ingestion of created timestamp. Created timestamps are injected as 0 valued samples when appropriate. See [PromCon talk](https://youtu.be/nWf0BfQ5EEA) for details. + +Currently Prometheus supports created timestamps only on the traditional Prometheus Protobuf protocol (WIP for other protocols). As a result, when enabling this feature, the Prometheus protobuf scrape protocol will be prioritized (See `scrape_config.scrape_protocols` settings for more details). + +Besides enabling this feature in Prometheus, created timestamps need to be exposed by the application being scraped. + +## Concurrent evaluation of independent rules + +`--enable-feature=concurrent-rule-eval` + +By default, rule groups execute concurrently, but the rules within a group execute sequentially; this is because rules can use the +output of a preceding rule as its input. However, if there is no detectable relationship between rules then there is no +reason to run them sequentially. +When the `concurrent-rule-eval` feature flag is enabled, rules without any dependency on other rules within a rule group will be evaluated concurrently. +This has the potential to improve rule group evaluation latency and resource utilization at the expense of adding more concurrent query load. + +The number of concurrent rule evaluations can be configured with `--rules.max-concurrent-rule-evals`, which is set to `4` by default. + +## Serve old Prometheus UI + +Fall back to serving the old (Prometheus 2.x) web UI instead of the new UI. The new UI that was released as part of Prometheus 3.0 is a complete rewrite and aims to be cleaner, less cluttered, and more modern under the hood. However, it is not fully feature complete and battle-tested yet, so some users may still prefer using the old UI. + +`--enable-feature=old-ui` + +## Metadata WAL Records + +`--enable-feature=metadata-wal-records` + +When enabled, Prometheus will store metadata in-memory and keep track of +metadata changes as WAL records on a per-series basis. + +This must be used if +you are also using remote write 2.0 as it will only gather metadata from the WAL. + +## Delay compaction start time + +`--enable-feature=delayed-compaction` + +A random offset, up to `10%` of the chunk range, is added to the Head compaction start time. This assists Prometheus instances in avoiding simultaneous compactions and reduces the load on shared resources. + +Only auto Head compactions and the operations directly resulting from them are subject to this delay. + +In the event of multiple consecutive Head compactions being possible, only the first compaction experiences this delay. + +Note that during this delay, the Head continues its usual operations, which include serving and appending series. + +Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place. + +## Delay __name__ label removal for PromQL engine + +`--enable-feature=promql-delayed-name-removal` + +When enabled, Prometheus will change the way in which the `__name__` label is removed from PromQL query results (for functions and expressions for which this is necessary). Specifically, it will delay the removal to the last step of the query evaluation, instead of every time an expression or function creating derived metrics is evaluated. + +This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label. + +## Auto Reload Config -## OTLP Receiver +`--enable-feature=auto-reload-config` -`--enable-feature=otlp-write-receiver` +When enabled, Prometheus will automatically reload its configuration file at a +specified interval. The interval is defined by the +`--config.auto-reload-interval` flag, which defaults to `30s`. -The OTLP receiver allows Prometheus to accept [OpenTelemetry](https://opentelemetry.io/) metrics writes. -Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features -won't work when you push OTLP metrics. \ No newline at end of file +Configuration reloads are triggered by detecting changes in the checksum of the +main configuration file or any referenced files, such as rule and scrape +configurations. To ensure consistency and avoid issues during reloads, it's +recommended to update these files atomically. diff --git a/docs/installation.md b/docs/installation.md index 28f64c0f95..c8e359e780 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -31,11 +31,19 @@ production deployments it is highly recommended to use a [named volume](https://docs.docker.com/storage/volumes/) to ease managing the data on Prometheus upgrades. -To provide your own configuration, there are several options. Here are -two examples. +### Setting command line parameters + +The Docker image is started with a number of default command line parameters, which +can be found in the [Dockerfile](https://github.com/prometheus/prometheus/blob/main/Dockerfile) (adjust the link to correspond with the version in use). + +If you want to add extra command line parameters to the `docker run` command, +you will need to re-add these yourself as they will be overwritten. ### Volumes & bind-mount +To provide your own configuration, there are several options. Here are +two examples. + Bind-mount your `prometheus.yml` from the host by running: ```bash diff --git a/docs/querying/api.md b/docs/querying/api.md index 8ddb834ef7..714438398b 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -25,8 +25,10 @@ Other non-`2xx` codes may be returned for errors occurring before the API endpoint is reached. An array of warnings may be returned if there are errors that do -not inhibit the request execution. All of the data that was successfully -collected will be returned in the data field. +not inhibit the request execution. An additional array of info-level +annotations may be returned for potential query issues that may or may +not be false positives. All of the data that was successfully collected +will be returned in the data field. The JSON response envelope format is as follows: @@ -40,9 +42,11 @@ The JSON response envelope format is as follows: "errorType": "", "error": "", - // Only if there were warnings while executing the request. + // Only set if there were warnings while executing the request. // There will still be data in the data field. - "warnings": [""] + "warnings": [""], + // Only set if there were info-level annnotations while executing the request. + "infos": [""] } ``` @@ -235,6 +239,75 @@ $ curl 'http://localhost:9090/api/v1/format_query?query=foo/bar' } ``` +## Parsing a PromQL expressions into a abstract syntax tree (AST) + +This endpoint is **experimental** and might change in the future. It is currently only meant to be used by Prometheus' own web UI, and the endpoint name and exact format returned may change from one Prometheus version to another. It may also be removed again in case it is no longer needed by the UI. + +The following endpoint parses a PromQL expression and returns it as a JSON-formatted AST (abstract syntax tree) representation: + +``` +GET /api/v1/parse_query +POST /api/v1/parse_query +``` + +URL query parameters: + +- `query=`: Prometheus expression query string. + +You can URL-encode these parameters directly in the request body by using the `POST` method and +`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large +query that may breach server-side URL character limits. + +The `data` section of the query result is a string containing the AST of the parsed query expression. + +The following example parses the expression `foo/bar`: + +```json +$ curl 'http://localhost:9090/api/v1/parse_query?query=foo/bar' +{ + "data" : { + "bool" : false, + "lhs" : { + "matchers" : [ + { + "name" : "__name__", + "type" : "=", + "value" : "foo" + } + ], + "name" : "foo", + "offset" : 0, + "startOrEnd" : null, + "timestamp" : null, + "type" : "vectorSelector" + }, + "matching" : { + "card" : "one-to-one", + "include" : [], + "labels" : [], + "on" : false + }, + "op" : "/", + "rhs" : { + "matchers" : [ + { + "name" : "__name__", + "type" : "=", + "value" : "bar" + } + ], + "name" : "bar", + "offset" : 0, + "startOrEnd" : null, + "timestamp" : null, + "type" : "vectorSelector" + }, + "type" : "binaryExpr" + }, + "status" : "success" +} +``` + ## Querying metadata Prometheus offers a set of API endpoints to query metadata about series and their labels. @@ -256,6 +329,7 @@ URL query parameters: series to return. At least one `match[]` argument must be provided. - `start=`: Start timestamp. - `end=`: End timestamp. +- `limit=`: Maximum number of returned series. Optional. 0 means disabled. You can URL-encode these parameters directly in the request body by using the `POST` method and `Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large @@ -306,6 +380,7 @@ URL query parameters: - `end=`: End timestamp. Optional. - `match[]=`: Repeated series selector argument that selects the series from which to read the label names. Optional. +- `limit=`: Maximum number of returned series. Optional. 0 means disabled. The `data` section of the JSON response is a list of string label names. @@ -356,6 +431,7 @@ URL query parameters: - `end=`: End timestamp. Optional. - `match[]=`: Repeated series selector argument that selects the series from which to read the label values. Optional. +- `limit=`: Maximum number of returned series. Optional. 0 means disabled. The `data` section of the JSON response is a list of string label values. @@ -404,7 +480,7 @@ $ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metr "exemplars": [ { "labels": { - "traceID": "EpTxMJ40fUus7aGY" + "trace_id": "EpTxMJ40fUus7aGY" }, "value": "6", "timestamp": 1600096945.479 @@ -421,14 +497,14 @@ $ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metr "exemplars": [ { "labels": { - "traceID": "Olp9XHlq763ccsfa" + "trace_id": "Olp9XHlq763ccsfa" }, "value": "19", "timestamp": 1600096955.479 }, { "labels": { - "traceID": "hCtjygkIHwAN9vs4" + "trace_id": "hCtjygkIHwAN9vs4" }, "value": "20", "timestamp": 1600096965.489 @@ -449,7 +525,7 @@ raw numbers. The keys `"histogram"` and `"histograms"` only show up if the experimental native histograms are present in the response. Their placeholder `` -is explained in detail in its own section below. +is explained in detail in its own section below. ### Range vectors @@ -467,9 +543,12 @@ Range vectors are returned as result type `matrix`. The corresponding ] ``` -Each series could have the `"values"` key, or the `"histograms"` key, or both. +Each series could have the `"values"` key, or the `"histograms"` key, or both. For a given timestamp, there will only be one sample of either float or histogram type. +Series are returned sorted by `metric`. Functions such as [`sort`](functions.md#sort) +and [`sort_by_label`](functions.md#sort_by_label) have no effect for range vectors. + ### Instant vectors Instant vectors are returned as result type `vector`. The corresponding @@ -488,6 +567,10 @@ Instant vectors are returned as result type `vector`. The corresponding Each series could have the `"value"` key, or the `"histogram"` key, but not both. +Series are not guaranteed to be returned in any particular order unless a function +such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)` +is used. + ### Scalars Scalar results are returned as result type `scalar`. The corresponding @@ -543,6 +626,7 @@ GET /api/v1/targets ``` Both the active and dropped targets are part of the response by default. +Dropped targets are subject to `keep_dropped_targets` limit, if set. `labels` represents the label set after relabeling has occurred. `discoveredLabels` represent the unmodified labels retrieved during service discovery before relabeling has occurred. @@ -678,6 +762,8 @@ URL query parameters: - `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. - `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done. - `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. +- `exclude_alerts=`: only return rules, do not return active alerts. +- `match[]=`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional. ```json $ curl http://localhost:9090/api/v1/rules @@ -1297,13 +1383,13 @@ endpoint is `/api/v1/write`. Find more details [here](../storage.md#overview). ## OTLP Receiver -Prometheus can be configured as a receiver for the OTLP Metrics protocol. This +Prometheus can be configured as a receiver for the OTLP Metrics protocol. This is not considered an efficient way of ingesting samples. Use it with caution for specific low-volume use cases. It is not suitable for replacing the ingestion via scraping. -Enable the OTLP receiver by the feature flag -`--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver +Enable the OTLP receiver by setting +`--web.enable-otlp-receiver`. When enabled, the OTLP receiver endpoint is `/api/v1/otlp/v1/metrics`. -*New in v2.47* \ No newline at end of file +*New in v2.47* diff --git a/docs/querying/basics.md b/docs/querying/basics.md index a4e17c5c60..66d7b8018d 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -8,13 +8,19 @@ sort_rank: 1 Prometheus provides a functional query language called PromQL (Prometheus Query Language) that lets the user select and aggregate time series data in real -time. The result of an expression can either be shown as a graph, viewed as -tabular data in Prometheus's expression browser, or consumed by external -systems via the [HTTP API](api.md). +time. + +When you send a query request to Prometheus, it can be an _instant query_, evaluated at one point in time, +or a _range query_ at equally-spaced steps between a start and an end time. PromQL works exactly the same +in each cases; the range query is just like an instant query run multiple times at different timestamps. + +In the Prometheus UI, the "Table" tab is for instant queries and the "Graph" tab is for range queries. + +Other programs can fetch the result of a PromQL expression via the [HTTP API](api.md). ## Examples -This document is meant as a reference. For learning, it might be easier to +This document is a Prometheus basic language reference. For learning, it may be easier to start with a couple of [examples](examples.md). ## Expression language data types @@ -28,9 +34,9 @@ evaluate to one of four types: * **String** - a simple string value; currently unused Depending on the use-case (e.g. when graphing vs. displaying the output of an -expression), only some of these types are legal as the result from a +expression), only some of these types are legal as the result of a user-specified expression. For example, an expression that returns an instant -vector is the only type that can be directly graphed. +vector is the only type which can be graphed. _Notes about the experimental native histograms:_ @@ -46,16 +52,15 @@ _Notes about the experimental native histograms:_ ### String literals -Strings may be specified as literals in single quotes, double quotes or -backticks. +String literals are designated by single quotes, double quotes or backticks. PromQL follows the same [escaping rules as -Go](https://golang.org/ref/spec#String_literals). In single or double quotes a +Go](https://golang.org/ref/spec#String_literals). For string literals in single or double quotes, a backslash begins an escape sequence, which may be followed by `a`, `b`, `f`, -`n`, `r`, `t`, `v` or `\`. Specific characters can be provided using octal -(`\nnn`) or hexadecimal (`\xnn`, `\unnnn` and `\Unnnnnnnn`). +`n`, `r`, `t`, `v` or `\`. Specific characters can be provided using octal +(`\nnn`) or hexadecimal (`\xnn`, `\unnnn` and `\Unnnnnnnn`) notations. -No escaping is processed inside backticks. Unlike Go, Prometheus does not discard newlines inside backticks. +Conversely, escape characters are not parsed in string literals designated by backticks. It is important to note that, unlike Go, Prometheus does not discard newlines inside backticks. Example: @@ -82,22 +87,42 @@ Examples: 0x8f -Inf NaN + + +As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change. + +Examples: + + 1s # Equivalent to 1.0 + 2m # Equivalent to 120.0 + 1ms # Equivalent to 0.001 + + +## Time series selectors -## Time series Selectors +These are the basic building-blocks that instruct PromQL what data to fetch. ### Instant vector selectors Instant vector selectors allow the selection of a set of time series and a -single sample value for each at a given timestamp (instant): in the simplest -form, only a metric name is specified. This results in an instant vector +single sample value for each at a given timestamp (point in time). In the simplest +form, only a metric name is specified, which results in an instant vector containing elements for all time series that have this metric name. +The value returned will be that of the most recent sample at or before the +query's evaluation timestamp (in the case of an +[instant query](api.md#instant-queries)) +or the current step within the query (in the case of a +[range query](api.md/#range-queries)). +The [`@` modifier](#modifier) allows overriding the timestamp relative to which +the selection takes place. Time series are only returned if their most recent sample is less than the [lookback period](#staleness) ago. + This example selects all time series that have the `http_requests_total` metric -name: +name, returning the most recent sample for each: http_requests_total -It is possible to filter these time series further by appending a comma separated list of label +It is possible to filter these time series further by appending a comma-separated list of label matchers in curly braces (`{}`). This example selects only those time series with the `http_requests_total` @@ -124,6 +149,33 @@ For example, this selects all `http_requests_total` time series for `staging`, Label matchers that match empty label values also select all time series that do not have the specific label set at all. It is possible to have multiple matchers for the same label name. +For example, given the dataset: + + http_requests_total + http_requests_total{replica="rep-a"} + http_requests_total{replica="rep-b"} + http_requests_total{environment="development"} + +The query `http_requests_total{environment=""}` would match and return: + + http_requests_total + http_requests_total{replica="rep-a"} + http_requests_total{replica="rep-b"} + +and would exclude: + + http_requests_total{environment="development"} + +Multiple matchers can be used for the same label name; they all must pass for a result to be returned. + +The query: + + http_requests_total{replica!="rep-a",replica=~"rep.*"} + +Would then match: + + http_requests_total{replica="rep-b"} + Vector selectors must either specify a name or at least one label matcher that does not match the empty string. The following expression is illegal: @@ -159,12 +211,12 @@ Range vector literals work like instant vector literals, except that they select a range of samples back from the current instant. Syntactically, a [time duration](#time-durations) is appended in square brackets (`[]`) at the end of a vector selector to specify how far back in time values should be fetched for -each resulting range vector element. The range is a closed interval, -i.e. samples with timestamps coinciding with either boundary of the range are -still included in the selection. +each resulting range vector element. The range is a left-open and right-closed interval, +i.e. samples with timestamps coinciding with the left boundary of the range are excluded from the selection, +while samples coinciding with the right boundary of the range are included in the selection. -In this example, we select all the values we have recorded within the last 5 -minutes for all time series that have the metric name `http_requests_total` and +In this example, we select all the values recorded less than 5m ago for all time series +that have the metric name `http_requests_total` and a `job` label set to `prometheus`: http_requests_total{job="prometheus"}[5m] @@ -178,11 +230,13 @@ following units: * `s` - seconds * `m` - minutes * `h` - hours -* `d` - days - assuming a day has always 24h -* `w` - weeks - assuming a week has always 7d -* `y` - years - assuming a year has always 365d +* `d` - days - assuming a day always has 24h +* `w` - weeks - assuming a week always has 7d +* `y` - years - assuming a year always has 365d1 + +1 For days in a year, the leap day is ignored, and conversely, for a minute, a leap second is ignored. -Time durations can be combined, by concatenation. Units must be ordered from the +Time durations can be combined by concatenation. Units must be ordered from the longest to the shortest. A given unit must only appear once in a time duration. Here are some examples of valid time durations: @@ -192,6 +246,15 @@ Here are some examples of valid time durations: 5m 10s + +As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change. + +Examples: + + 1.0 # Equivalent to 1s + 0.001 # Equivalent to 1ms + 120 # Equivalent to 2m + ### Offset modifier The `offset` modifier allows changing the time offset for individual @@ -212,13 +275,12 @@ While the following would be *incorrect*: sum(http_requests_total{method="GET"}) offset 5m // INVALID. -The same works for range vectors. This returns the 5-minute rate that -`http_requests_total` had a week ago: +The same works for range vectors. This returns the 5-minute [rate](./functions.md#rate) +that `http_requests_total` had a week ago: rate(http_requests_total[5m] offset 1w) -For comparisons with temporal shifts forward in time, a negative offset -can be specified: +When querying for samples in the past, a negative offset will enable temporal comparisons forward in time: rate(http_requests_total[5m] offset -1w) @@ -249,11 +311,11 @@ The same works for range vectors. This returns the 5-minute rate that rate(http_requests_total[5m] @ 1609746000) -The `@` modifier supports all representation of float literals described -above within the limits of `int64`. It can also be used along -with the `offset` modifier where the offset is applied relative to the `@` -modifier time irrespective of which modifier is written first. -These 2 queries will produce the same result. +The `@` modifier supports all representations of numeric literals described above. +It works with the `offset` modifier where the offset is applied relative to the `@` +modifier time. The results are the same irrespective of the order of the modifiers. + +For example, these two queries will produce the same result: # offset after @ http_requests_total @ 1609746000 offset 5m @@ -299,33 +361,36 @@ PromQL supports line comments that start with `#`. Example: ### Staleness -When queries are run, timestamps at which to sample data are selected +The timestamps at which to sample data, during a query, are selected independently of the actual present time series data. This is mainly to support cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated -time series do not exactly align in time. Because of their independence, +time series do not precisely align in time. Because of their independence, Prometheus needs to assign a value at those timestamps for each relevant time -series. It does so by simply taking the newest sample before this timestamp. +series. It does so by taking the newest sample that is less than the lookback period ago. +The lookback period is 5 minutes by default, but can be +[set with the `--query.lookback-delta` flag](../command-line/prometheus.md) If a target scrape or rule evaluation no longer returns a sample for a time -series that was previously present, that time series will be marked as stale. -If a target is removed, its previously returned time series will be marked as -stale soon afterwards. +series that was previously present, this time series will be marked as stale. +If a target is removed, the previously retrieved time series will be marked as +stale soon after removal. If a query is evaluated at a sampling timestamp after a time series is marked -stale, then no value is returned for that time series. If new samples are -subsequently ingested for that time series, they will be returned as normal. +as stale, then no value is returned for that time series. If new samples are +subsequently ingested for that time series, they will be returned as expected. -If no sample is found (by default) 5 minutes before a sampling timestamp, -no value is returned for that time series at this point in time. This -effectively means that time series "disappear" from graphs at times where their -latest collected sample is older than 5 minutes or after they are marked stale. +A time series will go stale when it is no longer exported, or the target no +longer exists. Such time series will disappear from graphs +at the times of their latest collected sample, and they will not be returned +in queries after they are marked stale. -Staleness will not be marked for time series that have timestamps included in -their scrapes. Only the 5 minute threshold will be applied in that case. +Some exporters, which put their own timestamps on samples, get a different behaviour: +series that stop being exported take the last value for (by default) 5 minutes before +disappearing. The `track_timestamps_staleness` setting can change this. ### Avoiding slow queries and overloads -If a query needs to operate on a very large amount of data, graphing it might +If a query needs to operate on a substantial amount of data, graphing it might time out or overload the server or browser. Thus, when constructing queries over unknown data, always start building the query in the tabular view of Prometheus's expression browser until the result set seems reasonable @@ -336,7 +401,7 @@ rule](../configuration/recording_rules.md#recording-rules). This is especially relevant for Prometheus's query language, where a bare metric name selector like `api_http_requests_total` could expand to thousands -of time series with different labels. Also keep in mind that expressions which +of time series with different labels. Also, keep in mind that expressions that aggregate over many time series will generate load on the server even if the output is only a small number of time series. This is similar to how it would be slow to sum all values of a column in a relational database, even if the diff --git a/docs/querying/examples.md b/docs/querying/examples.md index a4fd373bf5..8287ff6f62 100644 --- a/docs/querying/examples.md +++ b/docs/querying/examples.md @@ -39,7 +39,7 @@ To select all HTTP status codes except 4xx ones, you could run: ## Subquery -Return the 5-minute rate of the `http_requests_total` metric for the past 30 minutes, with a resolution of 1 minute. +Return the 5-minute [rate](./functions.md#rate) of the `http_requests_total` metric for the past 30 minutes, with a resolution of 1 minute. rate(http_requests_total[5m])[30m:1m] @@ -95,3 +95,13 @@ Assuming this metric contains one time series per running instance, you could count the number of running instances per application like this: count by (app) (instance_cpu_time_ns) + +If we are exploring some metrics for their labels, to e.g. be able to aggregate +over some of them, we could use the following: + + limitk(10, app_foo_metric_bar) + +Alternatively, if we wanted the returned timeseries to be more evenly sampled, +we could use the following to get approximately 10% of them: + + limit_ratio(0.1, app_foo_metric_bar) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 151a1c10b6..b6f2181803 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -79,7 +79,12 @@ labels of the 1-element output vector from the input vector. ## `ceil()` `ceil(v instant-vector)` rounds the sample values of all elements in `v` up to -the nearest integer. +the nearest integer value greater than or equal to v. + +* `ceil(+Inf) = +Inf` +* `ceil(±0) = ±0` +* `ceil(1.49) = 2.0` +* `ceil(1.78) = 2.0` ## `changes()` @@ -93,8 +98,9 @@ vector. clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`. Special cases: -- Return an empty vector if `min > max` -- Return `NaN` if `min` or `max` is `NaN` + +* Return an empty vector if `min > max` +* Return `NaN` if `min` or `max` is `NaN` ## `clamp_max()` @@ -145,7 +151,7 @@ delta(cpu_temp_celsius{host="zeus"}[2h]) ``` `delta` acts on native histograms by calculating a new histogram where each -compononent (sum and count of observations, buckets) is the difference between +component (sum and count of observations, buckets) is the difference between the respective component in the first and last native histogram in `v`. However, each element in `v` that contains a mix of float and native histogram samples within the range, will be missing from the result vector. @@ -173,7 +179,33 @@ Special cases are: ## `floor()` `floor(v instant-vector)` rounds the sample values of all elements in `v` down -to the nearest integer. +to the nearest integer value smaller than or equal to v. + +* `floor(+Inf) = +Inf` +* `floor(±0) = ±0` +* `floor(1.49) = 1.0` +* `floor(1.78) = 1.0` + +## `histogram_avg()` + +_This function only acts on native histograms, which are an experimental +feature. The behavior of this function may change in future versions of +Prometheus, including its removal from PromQL._ + +`histogram_avg(v instant-vector)` returns the arithmetic average of observed values stored in +a native histogram. Samples that are not native histograms are ignored and do +not show up in the returned vector. + +Use `histogram_avg` as demonstrated below to compute the average request duration +over a 5-minute window from a native histogram: + + histogram_avg(rate(http_request_duration_seconds[5m])) + +Which is equivalent to the following query: + + histogram_sum(rate(http_request_duration_seconds[5m])) + / + histogram_count(rate(http_request_duration_seconds[5m])) ## `histogram_count()` and `histogram_sum()` @@ -193,13 +225,6 @@ Use `histogram_count` in the following way to calculate a rate of observations histogram_count(rate(http_request_duration_seconds[10m])) -The additional use of `histogram_sum` enables the calculation of the average of -observed values (in this case corresponding to “average request duration”): - - histogram_sum(rate(http_request_duration_seconds[10m])) - / - histogram_count(rate(http_request_duration_seconds[10m])) - ## `histogram_fraction()` _This function only acts on native histograms, which are an experimental @@ -238,23 +263,23 @@ boundaries are inclusive or exclusive. ## `histogram_quantile()` `histogram_quantile(φ scalar, b instant-vector)` calculates the φ-quantile (0 ≤ -φ ≤ 1) from a [conventional +φ ≤ 1) from a [classic histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) or from a native histogram. (See [histograms and summaries](https://prometheus.io/docs/practices/histograms) for a detailed -explanation of φ-quantiles and the usage of the (conventional) histogram metric +explanation of φ-quantiles and the usage of the (classic) histogram metric type in general.) _Note that native histograms are an experimental feature. The behavior of this function when dealing with native histograms may change in future versions of Prometheus._ -The conventional float samples in `b` are considered the counts of observations -in each bucket of one or more conventional histograms. Each float sample must -have a label `le` where the label value denotes the inclusive upper bound of -the bucket. (Float samples without such a label are silently ignored.) The -other labels and the metric name are used to identify the buckets belonging to -each conventional histogram. The [histogram metric +The float samples in `b` are considered the counts of observations in each +bucket of one or more classic histograms. Each float sample must have a label +`le` where the label value denotes the inclusive upper bound of the bucket. +(Float samples without such a label are silently ignored.) The other labels and +the metric name are used to identify the buckets belonging to each classic +histogram. The [histogram metric type](https://prometheus.io/docs/concepts/metric_types/#histogram) automatically provides time series with the `_bucket` suffix and the appropriate labels. @@ -262,17 +287,17 @@ appropriate labels. The native histogram samples in `b` are treated each individually as a separate histogram to calculate the quantile from. -As long as no naming collisions arise, `b` may contain a mix of conventional +As long as no naming collisions arise, `b` may contain a mix of classic and native histograms. Use the `rate()` function to specify the time window for the quantile calculation. Example: A histogram metric is called `http_request_duration_seconds` (and -therefore the metric name for the buckets of a conventional histogram is +therefore the metric name for the buckets of a classic histogram is `http_request_duration_seconds_bucket`). To calculate the 90th percentile of request durations over the last 10m, use the following expression in case -`http_request_duration_seconds` is a conventional histogram: +`http_request_duration_seconds` is a classic histogram: histogram_quantile(0.9, rate(http_request_duration_seconds_bucket[10m])) @@ -283,9 +308,9 @@ For a native histogram, use the following expression instead: The quantile is calculated for each label combination in `http_request_duration_seconds`. To aggregate, use the `sum()` aggregator around the `rate()` function. Since the `le` label is required by -`histogram_quantile()` to deal with conventional histograms, it has to be +`histogram_quantile()` to deal with classic histograms, it has to be included in the `by` clause. The following expression aggregates the 90th -percentile by `job` for conventional histograms: +percentile by `job` for classic histograms: histogram_quantile(0.9, sum by (job, le) (rate(http_request_duration_seconds_bucket[10m]))) @@ -293,7 +318,7 @@ When aggregating native histograms, the expression simplifies to: histogram_quantile(0.9, sum by (job) (rate(http_request_duration_seconds[10m]))) -To aggregate all conventional histograms, specify only the `le` label: +To aggregate all classic histograms, specify only the `le` label: histogram_quantile(0.9, sum by (le) (rate(http_request_duration_seconds_bucket[10m]))) @@ -301,38 +326,101 @@ With native histograms, aggregating everything works as usual without any `by` c histogram_quantile(0.9, sum(rate(http_request_duration_seconds[10m]))) -The `histogram_quantile()` function interpolates quantile values by -assuming a linear distribution within a bucket. +In the (common) case that a quantile value does not coincide with a bucket +boundary, the `histogram_quantile()` function interpolates the quantile value +within the bucket the quantile value falls into. For classic histograms, for +native histograms with custom bucket boundaries, and for the zero bucket of +other native histograms, it assumes a uniform distribution of observations +within the bucket (also called _linear interpolation_). For the +non-zero-buckets of native histograms with a standard exponential bucketing +schema, the interpolation is done under the assumption that the samples within +the bucket are distributed in a way that they would uniformly populate the +buckets in a hypothetical histogram with higher resolution. (This is also +called _exponential interpolation_.) If `b` has 0 observations, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned. -The following is only relevant for conventional histograms: If `b` contains -fewer than two buckets, `NaN` is returned. The highest bucket must have an -upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If a quantile is located -in the highest bucket, the upper bound of the second highest bucket is -returned. A lower limit of the lowest bucket is assumed to be 0 if the upper -bound of that bucket is greater than -0. In that case, the usual linear interpolation is applied within that -bucket. Otherwise, the upper bound of the lowest bucket is returned for -quantiles located in the lowest bucket. +Special cases for classic histograms: + +* If `b` contains fewer than two buckets, `NaN` is returned. +* The highest bucket must have an upper bound of `+Inf`. (Otherwise, `NaN` is + returned.) +* If a quantile is located in the highest bucket, the upper bound of the second + highest bucket is returned. +* The lower limit of the lowest bucket is assumed to be 0 if the upper bound of + that bucket is greater than 0. In that case, the usual linear interpolation + is applied within that bucket. Otherwise, the upper bound of the lowest + bucket is returned for quantiles located in the lowest bucket. + +Special cases for native histograms (relevant for the exact interpolation +happening within the zero bucket): + +* A zero bucket with finite width is assumed to contain no negative + observations if the histogram has observations in positive buckets, but none + in negative buckets. +* A zero bucket with finite width is assumed to contain no positive + observations if the histogram has observations in negative buckets, but none + in positive buckets. + +You can use `histogram_quantile(0, v instant-vector)` to get the estimated +minimum value stored in a histogram. + +You can use `histogram_quantile(1, v instant-vector)` to get the estimated +maximum value stored in a histogram. + +Buckets of classic histograms are cumulative. Therefore, the following should +always be the case: + +* The counts in the buckets are monotonically increasing (strictly + non-decreasing). +* A lack of observations between the upper limits of two consecutive buckets + results in equal counts in those two buckets. + +However, floating point precision issues (e.g. small discrepancies introduced +by computing of buckets with `sum(rate(...))`) or invalid data might violate +these assumptions. In that case, `histogram_quantile` would be unable to return +meaningful results. To mitigate the issue, `histogram_quantile` assumes that +tiny relative differences between consecutive buckets are happening because of +floating point precision errors and ignores them. (The threshold to ignore a +difference between two buckets is a trillionth (1e-12) of the sum of both +buckets.) Furthermore, if there are non-monotonic bucket counts even after this +adjustment, they are increased to the value of the previous buckets to enforce +monotonicity. The latter is evidence for an actual issue with the input data +and is therefore flagged with an informational annotation reading `input to +histogram_quantile needed to be fixed for monotonicity`. If you encounter this +annotation, you should find and remove the source of the invalid data. + +## `histogram_stddev()` and `histogram_stdvar()` -You can use `histogram_quantile(0, v instant-vector)` to get the estimated minimum value stored in -a histogram. +_Both functions only act on native histograms, which are an experimental +feature. The behavior of these functions may change in future versions of +Prometheus, including their removal from PromQL._ + +`histogram_stddev(v instant-vector)` returns the estimated standard deviation +of observations in a native histogram, based on the geometric mean of the buckets +where the observations lie. Samples that are not native histograms are ignored and +do not show up in the returned vector. -You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in -a histogram. +Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard +variance of observations in a native histogram. +## `double_exponential_smoothing()` -## `holt_winters()` +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** -`holt_winters(v range-vector, sf scalar, tf scalar)` produces a smoothed value +`double_exponential_smoothing(v range-vector, sf scalar, tf scalar)` produces a smoothed value for time series based on the range in `v`. The lower the smoothing factor `sf`, the more importance is given to old data. The higher the trend factor `tf`, the more trends in the data is considered. Both `sf` and `tf` must be between 0 and 1. +For additional details, refer to [NIST Engineering Statistics Handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc433.htm). +In Prometheus V2 this function was called `holt_winters`. This caused confusion +since the Holt-Winters method usually refers to triple exponential smoothing. +Double exponential smoothing as implemented here is also referred to as "Holt +Linear". -`holt_winters` should only be used with gauges. +`double_exponential_smoothing` should only be used with gauges. ## `hour()` @@ -495,7 +583,7 @@ rate(http_requests_total{job="api-server"}[5m]) ``` `rate` acts on native histograms by calculating a new histogram where each -compononent (sum and count of observations, buckets) is the rate of increase +component (sum and count of observations, buckets) is the rate of increase between the respective component in the first and last native histogram in `v`. However, each element in `v` that contains a mix of float and native histogram samples within the range, will be missing from the result vector. @@ -551,10 +639,34 @@ have exactly one element, `scalar` will return `NaN`. `sort(v instant-vector)` returns vector elements sorted by their sample values, in ascending order. Native histograms are sorted by their sum of observations. +Please note that `sort` only affects the results of instant queries, as range query results always have a fixed output ordering. + ## `sort_desc()` Same as `sort`, but sorts in descending order. +Like `sort`, `sort_desc` only affects the results of instant queries, as range query results always have a fixed output ordering. + +## `sort_by_label()` + +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** + +`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by the values of the given labels in ascending order. In case these label values are equal, elements are sorted by their full label sets. + +Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering. + +This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_sort_order). + +## `sort_by_label_desc()` + +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** + +Same as `sort_by_label`, but sorts in descending order. + +Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering. + +This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_sort_order). + ## `sqrt()` `sqrt(v instant-vector)` calculates the square root of all elements in `v`. @@ -596,6 +708,12 @@ over time and return an instant vector with per-series aggregation results: * `last_over_time(range-vector)`: the most recent point value in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval. +If the [feature flag](../feature_flags.md#experimental-promql-functions) +`--enable-feature=promql-experimental-functions` is set, the following +additional functions are available: + +* `mad_over_time(range-vector)`: the median absolute deviation of all points in the specified interval. + Note that all values in the specified interval have the same weight in the aggregation even if the values are not equally spaced throughout the interval. @@ -607,21 +725,21 @@ ignore histogram samples. The trigonometric functions work in radians: -- `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)). -- `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)). -- `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)). -- `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)). -- `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)). -- `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)). -- `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)). -- `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)). -- `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)). -- `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)). -- `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)). -- `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)). +* `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)). +* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)). +* `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)). +* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)). +* `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)). +* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)). +* `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)). +* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)). +* `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)). +* `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)). +* `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)). +* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)). The following are useful for converting between degrees and radians: -- `deg(v instant-vector)`: converts radians to degrees for all elements in `v`. -- `pi()`: returns pi. -- `rad(v instant-vector)`: converts degrees to radians for all elements in `v`. +* `deg(v instant-vector)`: converts radians to degrees for all elements in `v`. +* `pi()`: returns pi. +* `rad(v instant-vector)`: converts degrees to radians for all elements in `v`. diff --git a/docs/querying/operators.md b/docs/querying/operators.md index 2a9376d77b..f5f217ff63 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -230,6 +230,8 @@ vector of fewer elements with aggregated values: * `bottomk` (smallest k elements by sample value) * `topk` (largest k elements by sample value) * `quantile` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions) +* `limitk` (sample n elements) +* `limit_ratio` (sample elements with approximately 𝑟 ratio if `𝑟 > 0`, and the complement of such samples if `𝑟 = -(1.0 - 𝑟)`) These operators can either be used to aggregate over **all** label dimensions or preserve distinct dimensions by including a `without` or `by` clause. These @@ -249,8 +251,8 @@ all other labels are preserved in the output. `by` does the opposite and drops labels that are not listed in the `by` clause, even if their label values are identical between all elements of the vector. -`parameter` is only required for `count_values`, `quantile`, `topk` and -`bottomk`. +`parameter` is only required for `count_values`, `quantile`, `topk`, +`bottomk`, `limitk` and `limit_ratio`. `count_values` outputs one time series per unique sample value. Each series has an additional label. The name of that label is given by the aggregation @@ -261,11 +263,16 @@ time series is the number of times that sample value was present. the input samples, including the original labels, are returned in the result vector. `by` and `without` are only used to bucket the input vector. +`limitk` and `limit_ratio` also return a subset of the input samples, +including the original labels in the result vector, these are experimental +operators that must be enabled with `--enable-feature=promql-experimental-functions`. + `quantile` calculates the φ-quantile, the value that ranks at number φ*N among the N metric values of the dimensions aggregated over. φ is provided as the aggregation parameter. For example, `quantile(0.5, ...)` calculates the median, `quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. + Example: If the metric `http_requests_total` had time series that fan out by @@ -291,6 +298,33 @@ To get the 5 largest HTTP requests counts across all instances we could write: topk(5, http_requests_total) +To sample 10 timeseries, for example to inspect labels and their values, we +could write: + + limitk(10, http_requests_total) + +To deterministically sample approximately 10% of timeseries we could write: + + limit_ratio(0.1, http_requests_total) + +Given that `limit_ratio()` implements a deterministic sampling algorithm (based +on labels' hash), you can get the _complement_ of the above samples, i.e. +approximately 90%, but precisely those not returned by `limit_ratio(0.1, ...)` +with: + + limit_ratio(-0.9, http_requests_total) + +You can also use this feature to e.g. verify that `avg()` is a representative +aggregation for your samples' values, by checking that the difference between +averaging two samples' subsets is "small" when compared to the standard +deviation. + + abs( + avg(limit_ratio(0.5, http_requests_total)) + - + avg(limit_ratio(-0.5, http_requests_total)) + ) <= bool stddev(http_requests_total) + ## Binary operator precedence The following list shows the precedence of binary operators in Prometheus, from @@ -310,7 +344,7 @@ so `2 ^ 3 ^ 2` is equivalent to `2 ^ (3 ^ 2)`. ## Operators for native histograms Native histograms are an experimental feature. Ingesting native histograms has -to be enabled via a [feature flag](../feature_flags.md#native-histograms). Once +to be enabled via a [feature flag](../../feature_flags.md#native-histograms). Once native histograms have been ingested, they can be queried (even after the feature flag has been disabled again). However, the operator support for native histograms is still very limited. diff --git a/docs/querying/remote_read_api.md b/docs/querying/remote_read_api.md index e3dd133069..efbd08e984 100644 --- a/docs/querying/remote_read_api.md +++ b/docs/querying/remote_read_api.md @@ -5,63 +5,7 @@ sort_rank: 7 # Remote Read API -This is not currently considered part of the stable API and is subject to change -even between non-major version releases of Prometheus. - -## Format overview - -The API response format is JSON. Every successful API request returns a `2xx` -status code. - -Invalid requests that reach the API handlers return a JSON error object -and one of the following HTTP response codes: - -- `400 Bad Request` when parameters are missing or incorrect. -- `422 Unprocessable Entity` when an expression can't be executed - ([RFC4918](https://tools.ietf.org/html/rfc4918#page-78)). -- `503 Service Unavailable` when queries time out or abort. - -Other non-`2xx` codes may be returned for errors occurring before the API -endpoint is reached. - -An array of warnings may be returned if there are errors that do -not inhibit the request execution. All of the data that was successfully -collected will be returned in the data field. - -The JSON response envelope format is as follows: - -``` -{ - "status": "success" | "error", - "data": , - - // Only set if status is "error". The data field may still hold - // additional data. - "errorType": "", - "error": "", - - // Only if there were warnings while executing the request. - // There will still be data in the data field. - "warnings": [""] -} -``` - -Generic placeholders are defined as follows: - -* ``: Input timestamps may be provided either in -[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format or as a Unix timestamp -in seconds, with optional decimal places for sub-second precision. Output -timestamps are always represented as Unix timestamps in seconds. -* ``: Prometheus [time series -selectors](basics.md#time-series-selectors) like `http_requests_total` or -`http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded. -* ``: [Prometheus duration strings](basics.md#time_durations). -For example, `5m` refers to a duration of 5 minutes. -* ``: boolean values (strings `true` and `false`). - -Note: Names of query parameters that may be repeated end with `[]`. - -## Remote Read API +> This is not currently considered part of the stable API and is subject to change even between non-major version releases of Prometheus. This API provides data read functionality from Prometheus. This interface expects [snappy](https://github.com/google/snappy) compression. The API definition is located [here](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto). @@ -79,5 +23,3 @@ This returns a message that includes a list of raw samples. These streamed chunks utilize an XOR algorithm inspired by the [Gorilla](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) compression to encode the chunks. However, it provides resolution to the millisecond instead of to the second. - - diff --git a/docs/storage.md b/docs/storage.md index bcb8f7853e..2142c970ff 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -17,9 +17,9 @@ Ingested samples are grouped into blocks of two hours. Each two-hour block consi of a directory containing a chunks subdirectory containing all the time series samples for that window of time, a metadata file, and an index file (which indexes metric names and labels to time series in the chunks directory). The samples in the chunks directory -are grouped together into one or more segment files of up to 512MB each by default. When series are -deleted via the API, deletion records are stored in separate tombstone files (instead -of deleting the data immediately from the chunk segments). +are grouped together into one or more segment files of up to 512MB each by default. When +series are deleted via the API, deletion records are stored in separate tombstone files +(instead of deleting the data immediately from the chunk segments). The current block for incoming samples is kept in memory and is not fully persisted. It is secured against crashes by a write-ahead log (WAL) that can be @@ -58,15 +58,20 @@ A Prometheus server's data directory looks something like this:    └── 00000000 ``` - Note that a limitation of local storage is that it is not clustered or replicated. Thus, it is not arbitrarily scalable or durable in the face of drive or node outages and should be managed like any other single node -database. The use of RAID is suggested for storage availability, and [snapshots](querying/api.md#snapshot) -are recommended for backups. With proper +database. + +[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups +made without snapshots run the risk of losing data that was recorded since +the last WAL sync, which typically happens every two hours. With proper architecture, it is possible to retain years of data in local storage. -Alternatively, external storage may be used via the [remote read/write APIs](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). Careful evaluation is required for these systems as they vary greatly in durability, performance, and efficiency. +Alternatively, external storage may be used via the +[remote read/write APIs](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). +Careful evaluation is required for these systems as they vary greatly in durability, +performance, and efficiency. For further details on file format, see [TSDB format](/tsdb/docs/format/README.md). @@ -74,40 +79,73 @@ For further details on file format, see [TSDB format](/tsdb/docs/format/README.m The initial two-hour blocks are eventually compacted into longer blocks in the background. -Compaction will create larger blocks containing data spanning up to 10% of the retention time, or 31 days, whichever is smaller. +Compaction will create larger blocks containing data spanning up to 10% of the retention time, +or 31 days, whichever is smaller. ## Operational aspects Prometheus has several flags that configure local storage. The most important are: -* `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`. -* `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`. Overrides `storage.tsdb.retention` if this flag is set to anything other than default. -* `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. The oldest data will be removed first. Defaults to `0` or disabled. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only the persistent blocks are deleted to honor this retention although WAL and m-mapped chunks are counted in the total size. So the minimum requirement for the disk is the peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head` (m-mapped Head chunks) directory combined (peaks every 2 hours). -* `--storage.tsdb.retention`: Deprecated in favor of `storage.tsdb.retention.time`. -* `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL). Depending on your data, you can expect the WAL size to be halved with little extra cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. Note that once enabled, downgrading Prometheus to a version below 2.11.0 will require deleting the WAL. - -Prometheus stores an average of only 1-2 bytes per sample. Thus, to plan the capacity of a Prometheus server, you can use the rough formula: +- `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`. +- `--storage.tsdb.retention.time`: How long to retain samples in storage. If neither + this flag nor `storage.tsdb.retention.size` is set, the retention time defaults to + `15d`. Supported units: y, w, d, h, m, s, ms. +- `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. + The oldest data will be removed first. Defaults to `0` or disabled. Units supported: + B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only + the persistent blocks are deleted to honor this retention although WAL and m-mapped + chunks are counted in the total size. So the minimum requirement for the disk is the + peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head` + (m-mapped Head chunks) directory combined (peaks every 2 hours). +- `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL). + Depending on your data, you can expect the WAL size to be halved with little extra + cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. + Note that once enabled, downgrading Prometheus to a version below 2.11.0 will + require deleting the WAL. + +Prometheus stores an average of only 1-2 bytes per sample. Thus, to plan the +capacity of a Prometheus server, you can use the rough formula: ``` needed_disk_space = retention_time_seconds * ingested_samples_per_second * bytes_per_sample ``` -To lower the rate of ingested samples, you can either reduce the number of time series you scrape (fewer targets or fewer series per target), or you can increase the scrape interval. However, reducing the number of series is likely more effective, due to compression of samples within a series. +To lower the rate of ingested samples, you can either reduce the number of +time series you scrape (fewer targets or fewer series per target), or you +can increase the scrape interval. However, reducing the number of series is +likely more effective, due to compression of samples within a series. If your local storage becomes corrupted for whatever reason, the best strategy to address the problem is to shut down Prometheus then remove the entire storage directory. You can also try removing individual block directories, -or the WAL directory to resolve the problem. Note that this means losing +or the WAL directory to resolve the problem. Note that this means losing approximately two hours data per block directory. Again, Prometheus's local storage is not intended to be durable long-term storage; external solutions offer extended retention and data durability. -CAUTION: Non-POSIX compliant filesystems are not supported for Prometheus' local storage as unrecoverable corruptions may happen. NFS filesystems (including AWS's EFS) are not supported. NFS could be POSIX-compliant, but most implementations are not. It is strongly recommended to use a local filesystem for reliability. +CAUTION: Non-POSIX compliant filesystems are not supported for Prometheus' +local storage as unrecoverable corruptions may happen. NFS filesystems +(including AWS's EFS) are not supported. NFS could be POSIX-compliant, +but most implementations are not. It is strongly recommended to use a +local filesystem for reliability. If both time and size retention policies are specified, whichever triggers first will be used. -Expired block cleanup happens in the background. It may take up to two hours to remove expired blocks. Blocks must be fully expired before they are removed. +Expired block cleanup happens in the background. It may take up to two hours +to remove expired blocks. Blocks must be fully expired before they are removed. + +## Right-Sizing Retention Size + +If you are utilizing `storage.tsdb.retention.size` to set a size limit, you +will want to consider the right size for this value relative to the storage you +have allocated for Prometheus. It is wise to reduce the retention size to provide +a buffer, ensuring that older entries will be removed before the allocated storage +for Prometheus becomes full. + +At present, we recommend setting the retention size to, at most, 80-85% of your +allocated Prometheus disk space. This increases the likelihood that older entries +will be removed prior to hitting any disk limitations. ## Remote storage integrations @@ -117,61 +155,107 @@ a set of interfaces that allow integrating with remote storage systems. ### Overview -Prometheus integrates with remote storage systems in three ways: +Prometheus integrates with remote storage systems in four ways: -* Prometheus can write samples that it ingests to a remote URL in a standardized format. -* Prometheus can receive samples from other Prometheus servers in a standardized format. -* Prometheus can read (back) sample data from a remote URL in a standardized format. +- Prometheus can write samples that it ingests to a remote URL in a [Remote Write format](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +- Prometheus can receive samples from other clients in a [Remote Write format](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +- Prometheus can read (back) sample data from a remote URL in a [Remote Read format](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto#L31). +- Prometheus can return sample data requested by clients in a [Remote Read format](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto#L31). ![Remote read and write architecture](images/remote_integrations.png) -The read and write protocols both use a snappy-compressed protocol buffer encoding over HTTP. The protocols are not considered as stable APIs yet and may change to use gRPC over HTTP/2 in the future, when all hops between Prometheus and the remote storage can safely be assumed to support HTTP/2. +The remote read and write protocols both use a snappy-compressed protocol buffer encoding over +HTTP. The read protocol is not yet considered as stable API. -For details on configuring remote storage integrations in Prometheus, see the [remote write](configuration/configuration.md#remote_write) and [remote read](configuration/configuration.md#remote_read) sections of the Prometheus configuration documentation. +The write protocol has a [stable specification for 1.0 version](https://prometheus.io/docs/specs/remote_write_spec/) +and [experimental specification for 2.0 version](https://prometheus.io/docs/specs/remote_write_spec_2_0/), +both supported by Prometheus server. -The built-in remote write receiver can be enabled by setting the `--web.enable-remote-write-receiver` command line flag. When enabled, the remote write receiver endpoint is `/api/v1/write`. +For details on configuring remote storage integrations in Prometheus as a client, see the +[remote write](configuration/configuration.md#remote_write) and +[remote read](configuration/configuration.md#remote_read) sections of the Prometheus +configuration documentation. -For details on the request and response messages, see the [remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto). +Note that on the read path, Prometheus only fetches raw series data for a set of +label selectors and time ranges from the remote end. All PromQL evaluation on the +raw data still happens in Prometheus itself. This means that remote read queries +have some scalability limit, since all necessary data needs to be loaded into the +querying Prometheus server first and then processed there. However, supporting +fully distributed evaluation of PromQL was deemed infeasible for the time being. -Note that on the read path, Prometheus only fetches raw series data for a set of label selectors and time ranges from the remote end. All PromQL evaluation on the raw data still happens in Prometheus itself. This means that remote read queries have some scalability limit, since all necessary data needs to be loaded into the querying Prometheus server first and then processed there. However, supporting fully distributed evaluation of PromQL was deemed infeasible for the time being. +Prometheus also serves both protocols. The built-in remote write receiver can be enabled +by setting the `--web.enable-remote-write-receiver` command line flag. When enabled, +the remote write receiver endpoint is `/api/v1/write`. The remote read endpoint is +available on [`/api/v1/read`](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/). ### Existing integrations -To learn more about existing integrations with remote storage systems, see the [Integrations documentation](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). +To learn more about existing integrations with remote storage systems, see the +[Integrations documentation](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). ## Backfilling from OpenMetrics format ### Overview -If a user wants to create blocks into the TSDB from data that is in [OpenMetrics](https://openmetrics.io/) format, they can do so using backfilling. However, they should be careful and note that it is not safe to backfill data from the last 3 hours (the current head block) as this time range may overlap with the current head block Prometheus is still mutating. Backfilling will create new TSDB blocks, each containing two hours of metrics data. This limits the memory requirements of block creation. Compacting the two hour blocks into larger blocks is later done by the Prometheus server itself. +If a user wants to create blocks into the TSDB from data that is in +[OpenMetrics](https://openmetrics.io/) format, they can do so using backfilling. +However, they should be careful and note that it is not safe to backfill data +from the last 3 hours (the current head block) as this time range may overlap +with the current head block Prometheus is still mutating. Backfilling will +create new TSDB blocks, each containing two hours of metrics data. This limits +the memory requirements of block creation. Compacting the two hour blocks into +larger blocks is later done by the Prometheus server itself. + +A typical use case is to migrate metrics data from a different monitoring system +or time-series database to Prometheus. To do so, the user must first convert the +source data into [OpenMetrics](https://openmetrics.io/) format, which is the +input format for the backfilling as described below. -A typical use case is to migrate metrics data from a different monitoring system or time-series database to Prometheus. To do so, the user must first convert the source data into [OpenMetrics](https://openmetrics.io/) format, which is the input format for the backfilling as described below. +Note that native histograms and staleness markers are not supported by this +procedure, as they cannot be represented in the OpenMetrics format. ### Usage -Backfilling can be used via the Promtool command line. Promtool will write the blocks to a directory. By default this output directory is ./data/, you can change it by using the name of the desired output directory as an optional argument in the sub-command. +Backfilling can be used via the Promtool command line. Promtool will write the blocks +to a directory. By default this output directory is ./data/, you can change it by +using the name of the desired output directory as an optional argument in the sub-command. ``` promtool tsdb create-blocks-from openmetrics [] ``` -After the creation of the blocks, move it to the data directory of Prometheus. If there is an overlap with the existing blocks in Prometheus, the flag `--storage.tsdb.allow-overlapping-blocks` needs to be set for Prometheus versions v2.38 and below. Note that any backfilled data is subject to the retention configured for your Prometheus server (by time or size). +After the creation of the blocks, move it to the data directory of Prometheus. +If there is an overlap with the existing blocks in Prometheus, the flag +`--storage.tsdb.allow-overlapping-blocks` needs to be set for Prometheus versions +v2.38 and below. Note that any backfilled data is subject to the retention +configured for your Prometheus server (by time or size). #### Longer Block Durations -By default, the promtool will use the default block duration (2h) for the blocks; this behavior is the most generally applicable and correct. However, when backfilling data over a long range of times, it may be advantageous to use a larger value for the block duration to backfill faster and prevent additional compactions by TSDB later. +By default, the promtool will use the default block duration (2h) for the blocks; +this behavior is the most generally applicable and correct. However, when backfilling +data over a long range of times, it may be advantageous to use a larger value for +the block duration to backfill faster and prevent additional compactions by TSDB later. -The `--max-block-duration` flag allows the user to configure a maximum duration of blocks. The backfilling tool will pick a suitable block duration no larger than this. +The `--max-block-duration` flag allows the user to configure a maximum duration of blocks. +The backfilling tool will pick a suitable block duration no larger than this. -While larger blocks may improve the performance of backfilling large datasets, drawbacks exist as well. Time-based retention policies must keep the entire block around if even one sample of the (potentially large) block is still within the retention policy. Conversely, size-based retention policies will remove the entire block even if the TSDB only goes over the size limit in a minor way. +While larger blocks may improve the performance of backfilling large datasets, +drawbacks exist as well. Time-based retention policies must keep the entire block +around if even one sample of the (potentially large) block is still within the +retention policy. Conversely, size-based retention policies will remove the entire +block even if the TSDB only goes over the size limit in a minor way. -Therefore, backfilling with few blocks, thereby choosing a larger block duration, must be done with care and is not recommended for any production instances. +Therefore, backfilling with few blocks, thereby choosing a larger block duration, +must be done with care and is not recommended for any production instances. ## Backfilling for Recording Rules ### Overview -When a new recording rule is created, there is no historical data for it. Recording rule data only exists from the creation time on. `promtool` makes it possible to create historical recording rule data. +When a new recording rule is created, there is no historical data for it. +Recording rule data only exists from the creation time on. +`promtool` makes it possible to create historical recording rule data. ### Usage @@ -187,14 +271,26 @@ $ promtool tsdb create-blocks-from rules \ rules.yaml rules2.yaml ``` -The recording rule files provided should be a normal [Prometheus rules file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). +The recording rule files provided should be a normal +[Prometheus rules file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). -The output of `promtool tsdb create-blocks-from rules` command is a directory that contains blocks with the historical rule data for all rules in the recording rule files. By default, the output directory is `data/`. In order to make use of this new block data, the blocks must be moved to a running Prometheus instance data dir `storage.tsdb.path` (for Prometheus versions v2.38 and below, the flag `--storage.tsdb.allow-overlapping-blocks` must be enabled). Once moved, the new blocks will merge with existing blocks when the next compaction runs. +The output of `promtool tsdb create-blocks-from rules` command is a directory that +contains blocks with the historical rule data for all rules in the recording rule +files. By default, the output directory is `data/`. In order to make use of this +new block data, the blocks must be moved to a running Prometheus instance data dir +`storage.tsdb.path` (for Prometheus versions v2.38 and below, the flag +`--storage.tsdb.allow-overlapping-blocks` must be enabled). Once moved, the new +blocks will merge with existing blocks when the next compaction runs. ### Limitations -- If you run the rule backfiller multiple times with the overlapping start/end times, blocks containing the same data will be created each time the rule backfiller is run. +- If you run the rule backfiller multiple times with the overlapping start/end times, + blocks containing the same data will be created each time the rule backfiller is run. - All rules in the recording rule files will be evaluated. -- If the `interval` is set in the recording rule file that will take priority over the `eval-interval` flag in the rule backfill command. +- If the `interval` is set in the recording rule file that will take priority over + the `eval-interval` flag in the rule backfill command. - Alerts are currently ignored if they are in the recording rule file. -- Rules in the same group cannot see the results of previous rules. Meaning that rules that refer to other rules being backfilled is not supported. A workaround is to backfill multiple times and create the dependent data first (and move dependent data to the Prometheus server data dir so that it is accessible from the Prometheus API). +- Rules in the same group cannot see the results of previous rules. Meaning that rules + that refer to other rules being backfilled is not supported. A workaround is to + backfill multiple times and create the dependent data first (and move dependent + data to the Prometheus server data dir so that it is accessible from the Prometheus API). diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go index ae656db193..8ccbafe6f1 100644 --- a/documentation/examples/custom-sd/adapter-usage/main.go +++ b/documentation/examples/custom-sd/adapter-usage/main.go @@ -28,8 +28,10 @@ import ( "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + prom_discovery "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/documentation/examples/custom-sd/adapter" "github.com/prometheus/prometheus/util/strutil" @@ -125,9 +127,9 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target // since the service may be registered remotely through a different node. var addr string if node.ServiceAddress != "" { - addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort)) + addr = net.JoinHostPort(node.ServiceAddress, strconv.Itoa(node.ServicePort)) } else { - addr = net.JoinHostPort(node.Address, fmt.Sprintf("%d", node.ServicePort)) + addr = net.JoinHostPort(node.Address, strconv.Itoa(node.ServicePort)) } target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)} @@ -268,7 +270,21 @@ func main() { if err != nil { fmt.Println("err: ", err) } - sdAdapter := adapter.NewAdapter(ctx, *outputFile, "exampleSD", disc, logger) + + if err != nil { + level.Error(logger).Log("msg", "failed to create discovery metrics", "err", err) + os.Exit(1) + } + + reg := prometheus.NewRegistry() + refreshMetrics := prom_discovery.NewRefreshMetrics(reg) + metrics, err := prom_discovery.RegisterSDMetrics(reg, refreshMetrics) + if err != nil { + level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err) + os.Exit(1) + } + + sdAdapter := adapter.NewAdapter(ctx, *outputFile, "exampleSD", disc, logger, metrics, reg) sdAdapter.Run() <-ctx.Done() diff --git a/documentation/examples/custom-sd/adapter/adapter.go b/documentation/examples/custom-sd/adapter/adapter.go index 57c32ce492..dcf5a2b78c 100644 --- a/documentation/examples/custom-sd/adapter/adapter.go +++ b/documentation/examples/custom-sd/adapter/adapter.go @@ -25,6 +25,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery" @@ -162,12 +163,12 @@ func (a *Adapter) Run() { } // NewAdapter creates a new instance of Adapter. -func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger) *Adapter { +func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter { return &Adapter{ ctx: ctx, disc: d, groups: make(map[string]*customSD), - manager: discovery.NewManager(ctx, logger), + manager: discovery.NewManager(ctx, logger, registerer, sdMetrics), output: file, name: name, logger: logger, diff --git a/documentation/examples/custom-sd/adapter/adapter_test.go b/documentation/examples/custom-sd/adapter/adapter_test.go index eaf34c6673..329ca8c29a 100644 --- a/documentation/examples/custom-sd/adapter/adapter_test.go +++ b/documentation/examples/custom-sd/adapter/adapter_test.go @@ -18,9 +18,11 @@ import ( "os" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -226,6 +228,13 @@ func TestWriteOutput(t *testing.T) { require.NoError(t, err) defer os.Remove(tmpfile.Name()) tmpfile.Close() - adapter := NewAdapter(ctx, tmpfile.Name(), "test_sd", nil, nil) + require.NoError(t, err) + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + sdMetrics, err := discovery.RegisterSDMetrics(reg, refreshMetrics) + require.NoError(t, err) + + adapter := NewAdapter(ctx, tmpfile.Name(), "test_sd", nil, nil, sdMetrics, reg) require.NoError(t, adapter.writeOutput()) } diff --git a/documentation/examples/prometheus-kubernetes.yml b/documentation/examples/prometheus-kubernetes.yml index 9a62287342..ad7451c2d7 100644 --- a/documentation/examples/prometheus-kubernetes.yml +++ b/documentation/examples/prometheus-kubernetes.yml @@ -8,6 +8,11 @@ # If you are using Kubernetes 1.7.2 or earlier, please take note of the comments # for the kubernetes-cadvisor job; you will need to edit or remove this job. +# Keep at most 100 sets of details of targets dropped by relabeling. +# This information is used to display in the UI for troubleshooting. +global: + keep_dropped_targets: 100 + # Scrape config for API servers. # # Kubernetes exposes API servers as endpoints to the default/kubernetes diff --git a/documentation/examples/prometheus-linode.yml b/documentation/examples/prometheus-linode.yml index 993b6a5c12..fe1a740028 100644 --- a/documentation/examples/prometheus-linode.yml +++ b/documentation/examples/prometheus-linode.yml @@ -12,6 +12,7 @@ scrape_configs: linode_sd_configs: - authorization: credentials: "" + region: "us-east" relabel_configs: # Only scrape targets that have a tag 'monitoring'. - source_labels: [__meta_linode_tags] diff --git a/documentation/examples/prometheus-ovhcloud.yml b/documentation/examples/prometheus-ovhcloud.yml index 21facad1ca..b2cc60af25 100644 --- a/documentation/examples/prometheus-ovhcloud.yml +++ b/documentation/examples/prometheus-ovhcloud.yml @@ -1,4 +1,4 @@ -# An example scrape configuration for running Prometheus with Ovhcloud. +# An example scrape configuration for running Prometheus with OVHcloud. scrape_configs: - job_name: 'ovhcloud' ovhcloud_sd_configs: diff --git a/documentation/examples/remote_storage/example_write_adapter/README.md b/documentation/examples/remote_storage/example_write_adapter/README.md index 9748c448db..968d2b25cb 100644 --- a/documentation/examples/remote_storage/example_write_adapter/README.md +++ b/documentation/examples/remote_storage/example_write_adapter/README.md @@ -7,6 +7,7 @@ To use it: ``` go build + ./example_write_adapter ``` @@ -15,10 +16,19 @@ go build ```yaml remote_write: - url: "http://localhost:1234/receive" + protobuf_message: "io.prometheus.write.v2.Request" +``` + +or for the eventually deprecated Remote Write 1.0 message: + +```yaml +remote_write: + - url: "http://localhost:1234/receive" + protobuf_message: "prometheus.WriteRequest" ``` -Then start Prometheus: +Then start Prometheus (in separate terminal): ``` -./prometheus +./prometheus --enable-feature=metadata-wal-records ``` diff --git a/documentation/examples/remote_storage/example_write_adapter/server.go b/documentation/examples/remote_storage/example_write_adapter/server.go index 48c0a9571f..727a3056d3 100644 --- a/documentation/examples/remote_storage/example_write_adapter/server.go +++ b/documentation/examples/remote_storage/example_write_adapter/server.go @@ -18,44 +18,103 @@ import ( "log" "net/http" - "github.com/prometheus/common/model" - + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage/remote" ) func main() { http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) { - req, err := remote.DecodeWriteRequest(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + enc := r.Header.Get("Content-Encoding") + if enc == "" { + http.Error(w, "missing Content-Encoding header", http.StatusUnsupportedMediaType) + return + } + if enc != "snappy" { + http.Error(w, "unknown encoding, only snappy supported", http.StatusUnsupportedMediaType) return } - for _, ts := range req.Timeseries { - m := make(model.Metric, len(ts.Labels)) - for _, l := range ts.Labels { - m[model.LabelName(l.Name)] = model.LabelValue(l.Value) - } - fmt.Println(m) + contentType := r.Header.Get("Content-Type") + if contentType == "" { + http.Error(w, "missing Content-Type header", http.StatusUnsupportedMediaType) + } - for _, s := range ts.Samples { - fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) - } + defer func() { _ = r.Body.Close() }() - for _, e := range ts.Exemplars { - m := make(model.Metric, len(e.Labels)) - for _, l := range e.Labels { - m[model.LabelName(l.Name)] = model.LabelValue(l.Value) - } - fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp) + // Very simplistic content parsing, see + // storage/remote/write_handler.go#WriteHandler.ServeHTTP for production example. + switch contentType { + case "application/x-protobuf", "application/x-protobuf;proto=prometheus.WriteRequest": + req, err := remote.DecodeWriteRequest(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return } + printV1(req) + case "application/x-protobuf;proto=io.prometheus.write.v2.Request": + req, err := remote.DecodeWriteV2Request(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + printV2(req) + default: + msg := fmt.Sprintf("Unknown remote write content type: %s", contentType) + fmt.Println(msg) + http.Error(w, msg, http.StatusBadRequest) + } + }) + log.Fatal(http.ListenAndServe(":1234", nil)) +} + +func printV1(req *prompb.WriteRequest) { + b := labels.NewScratchBuilder(0) + for _, ts := range req.Timeseries { + fmt.Println(ts.ToLabels(&b, nil)) - for _, hp := range ts.Histograms { - h := remote.HistogramProtoToHistogram(hp) + for _, s := range ts.Samples { + fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) + } + for _, ep := range ts.Exemplars { + e := ep.ToExemplar(&b, nil) + fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp) + } + for _, hp := range ts.Histograms { + if hp.IsFloatHistogram() { + h := hp.ToFloatHistogram() fmt.Printf("\tHistogram: %s\n", h.String()) + continue } + h := hp.ToIntHistogram() + fmt.Printf("\tHistogram: %s\n", h.String()) } - }) + } +} - log.Fatal(http.ListenAndServe(":1234", nil)) +func printV2(req *writev2.Request) { + b := labels.NewScratchBuilder(0) + for _, ts := range req.Timeseries { + l := ts.ToLabels(&b, req.Symbols) + m := ts.ToMetadata(req.Symbols) + fmt.Println(l, m) + + for _, s := range ts.Samples { + fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) + } + for _, ep := range ts.Exemplars { + e := ep.ToExemplar(&b, req.Symbols) + fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp) + } + for _, hp := range ts.Histograms { + if hp.IsFloatHistogram() { + h := hp.ToFloatHistogram() + fmt.Printf("\tHistogram: %s\n", h.String()) + continue + } + h := hp.ToIntHistogram() + fmt.Printf("\tHistogram: %s\n", h.String()) + } + } } diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index b4c8e077d6..a1be5c9b4e 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -1,68 +1,75 @@ module github.com/prometheus/prometheus/documentation/examples/remote_storage -go 1.18 +go 1.22.0 require ( - github.com/alecthomas/kingpin/v2 v2.3.2 + github.com/alecthomas/kingpin/v2 v2.4.0 github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/influxdata/influxdb v1.11.2 - github.com/prometheus/client_golang v1.16.0 - github.com/prometheus/common v0.44.0 - github.com/prometheus/prometheus v0.45.0 - github.com/stretchr/testify v1.8.4 + github.com/influxdata/influxdb v1.11.6 + github.com/prometheus/client_golang v1.20.4 + github.com/prometheus/common v0.60.0 + github.com/prometheus/prometheus v0.53.1 + github.com/stretchr/testify v1.9.0 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/aws/aws-sdk-go v1.44.276 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect + github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect - go.opentelemetry.io/otel v1.16.0 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect - go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.opentelemetry.io/collector/pdata v1.8.0 // indirect + go.opentelemetry.io/collector/semconv v0.101.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect + go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/otel/trace v1.27.0 // indirect go.uber.org/atomic v1.11.0 // indirect - go.uber.org/goleak v1.2.1 // indirect - golang.org/x/crypto v0.8.0 // indirect - golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect - golang.org/x/time v0.3.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.30.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apimachinery v0.29.3 // indirect + k8s.io/client-go v0.29.3 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect ) exclude ( @@ -72,4 +79,10 @@ exclude ( cloud.google.com/go v0.34.0 cloud.google.com/go v0.65.0 cloud.google.com/go v0.82.0 + + // Fixing ambiguous import: found package google.golang.org/genproto/googleapis/api/annotations in multiple modules. + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 ) + +// TODO(bwplotka): Move to main branch commit or perhaps released version. +replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 2ade3a1b7c..936b448d84 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,62 +1,76 @@ -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg= -github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= -github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= +github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.276 h1:ywPlx9C5Yc482dUgAZ9bHpQ6onVvJvYE9FJWsNDCEy0= -github.com/aws/aws-sdk-go v1.44.276/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 h1:zlUubfBUxApscKFsF4VSvvfhsBNTBu0eF/ddvpo96yk= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= +github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= +github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= -github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ= -github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= +github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -68,21 +82,26 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -93,176 +112,225 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gophercloud/gophercloud v1.4.0 h1:RqEu43vaX0lb0LanZr5BylK5ICVxjpFFoc0sxivyuHU= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= +github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/hashicorp/consul/api v1.21.0 h1:WMR2JiyuaQWRAMFaOGiYfY4Q4HRpyYRe/oYQofjyduM= -github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= +github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f h1:yxjcAZRuYymIDC0W4IQHgTe9EQdu2BsjPlVmKwyVZT4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hetznercloud/hcloud-go v1.45.1 h1:nl0OOklFfQT5J6AaNIOhl5Ruh3fhmGmhvZEqHbibVuk= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo= -github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw= -github.com/ionos-cloud/sdk-go/v6 v6.1.7 h1:uVG1Q/ZDJ7YmCI9Oevpue9xJEH5UrUMyXv8gm7NTxIw= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= +github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/influxdata/influxdb v1.11.6 h1:zS5MRY+RQ5/XFTer5R8xQRnY17JYSbacvO6OaP164wU= +github.com/influxdata/influxdb v1.11.6/go.mod h1:F10NoQb9qa04lME3pTPWQrYt4JZ/ke1Eei+1ttgHHrg= +github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= +github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.17.0 h1:aWS98f0jUoY2lhsEuBxRdVkqyGM0nazPd68AEDF0EvU= +github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= +github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= +github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= +github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= +github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/prometheus/prometheus v0.45.0 h1:O/uG+Nw4kNxx/jDPxmjsSDd+9Ohql6E7ZSY1x5x/0KI= -github.com/prometheus/prometheus v0.45.0/go.mod h1:jC5hyO8ItJBnDWGecbEucMyXjzxGv9cxsxsjS9u5s1w= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 h1:1WuWJu7/e8SqK+uQl7lfk/N/oMZTL2NE/TJsNKRNMc4= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc= +github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= +github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= +go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= +go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= +go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -275,24 +343,21 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -307,43 +372,38 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -351,15 +411,17 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -371,13 +433,21 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= -k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= -k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= -k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/escape.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/escape.go index c896e1bd82..1386f46761 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/escape.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/escape.go @@ -69,16 +69,16 @@ const ( // // Examples: // -// "foo-bar-42" -> "foo-bar-42" +// "foo-bar-42" -> "foo-bar-42" // -// "foo_bar%42" -> "foo_bar%2542" +// "foo_bar%42" -> "foo_bar%2542" // -// "http://example.org:8080" -> "http:%2F%2Fexample%2Eorg:8080" +// "http://example.org:8080" -> "http:%2F%2Fexample%2Eorg:8080" // -// "Björn's email: bjoern@soundcloud.com" -> -// "Bj%C3%B6rn's%20email:%20bjoern%40soundcloud.com" +// "Björn's email: bjoern@soundcloud.com" -> +// "Bj%C3%B6rn's%20email:%20bjoern%40soundcloud.com" // -// "日" -> "%E6%97%A5" +// "日" -> "%E6%97%A5" func escape(tv model.LabelValue) string { length := len(tv) result := bytes.NewBuffer(make([]byte, 0, length)) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go index cb56514e4b..a738c01dcd 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go @@ -74,7 +74,7 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123 server := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, "POST", r.Method, "Unexpected method.") + require.Equal(t, http.MethodPost, r.Method, "Unexpected method.") require.Equal(t, "/write", r.URL.Path, "Unexpected path.") b, err := io.ReadAll(r.Body) require.NoError(t, err, "Error reading body.") diff --git a/documentation/examples/remote_storage/remote_storage_adapter/main.go b/documentation/examples/remote_storage/remote_storage_adapter/main.go index 85ef1839a1..bb348aba7f 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/main.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/main.go @@ -83,9 +83,12 @@ var ( ) sentBatchDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Name: "sent_batch_duration_seconds", - Help: "Duration of sample batch send calls to the remote storage.", - Buckets: prometheus.DefBuckets, + Name: "sent_batch_duration_seconds", + Help: "Duration of sample batch send calls to the remote storage.", + Buckets: prometheus.DefBuckets, + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 1 * time.Hour, }, []string{"remote"}, ) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go index 0fa7c5a4b7..abb1d0b7d3 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go @@ -105,7 +105,7 @@ func (c *Client) Write(samples model.Samples) error { ctx, cancel := context.WithTimeout(context.Background(), c.timeout) defer cancel() - req, err := http.NewRequest("POST", u.String(), bytes.NewBuffer(buf)) + req, err := http.NewRequest(http.MethodPost, u.String(), bytes.NewBuffer(buf)) if err != nil { return err } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go index d96f9017af..99cef0b242 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go @@ -51,16 +51,16 @@ type TagValue model.LabelValue // // Examples: // -// "foo-bar-42" -> "foo-bar-42" +// "foo-bar-42" -> "foo-bar-42" // -// "foo_bar_42" -> "foo__bar__42" +// "foo_bar_42" -> "foo__bar__42" // -// "http://example.org:8080" -> "http_.//example.org_.8080" +// "http://example.org:8080" -> "http_.//example.org_.8080" // -// "Björn's email: bjoern@soundcloud.com" -> -// "Bj_C3_B6rn_27s_20email_._20bjoern_40soundcloud.com" +// "Björn's email: bjoern@soundcloud.com" -> +// "Bj_C3_B6rn_27s_20email_._20bjoern_40soundcloud.com" // -// "日" -> "_E6_97_A5" +// "日" -> "_E6_97_A5" func (tv TagValue) MarshalJSON() ([]byte, error) { length := len(tv) // Need at least two more bytes than in tv. diff --git a/documentation/prometheus-mixin/alerts.libsonnet b/documentation/prometheus-mixin/alerts.libsonnet index 3efb0f27d1..563daab801 100644 --- a/documentation/prometheus-mixin/alerts.libsonnet +++ b/documentation/prometheus-mixin/alerts.libsonnet @@ -34,6 +34,20 @@ description: 'Prometheus %(prometheusName)s has failed to refresh SD with mechanism {{$labels.mechanism}}.' % $._config, }, }, + { + alert: 'PrometheusKubernetesListWatchFailures', + expr: ||| + increase(prometheus_sd_kubernetes_failures_total{%(prometheusSelector)s}[5m]) > 0 + ||| % $._config, + 'for': '15m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Requests in Kubernetes SD are failing.', + description: 'Kubernetes service discovery of Prometheus %(prometheusName)s is experiencing {{ printf "%%.0f" $value }} failures with LIST/WATCH requests to the Kubernetes API in the last 5 minutes.' % $._config, + }, + }, { alert: 'PrometheusNotificationQueueRunningFull', expr: ||| @@ -122,7 +136,7 @@ alert: 'PrometheusNotIngestingSamples', expr: ||| ( - rate(prometheus_tsdb_head_samples_appended_total{%(prometheusSelector)s}[5m]) <= 0 + sum without(type) (rate(prometheus_tsdb_head_samples_appended_total{%(prometheusSelector)s}[5m])) <= 0 and ( sum without(scrape_job) (prometheus_target_metadata_cache_entries{%(prometheusSelector)s}) > 0 diff --git a/documentation/prometheus-mixin/config.libsonnet b/documentation/prometheus-mixin/config.libsonnet index ab9079a5e3..70d46a2212 100644 --- a/documentation/prometheus-mixin/config.libsonnet +++ b/documentation/prometheus-mixin/config.libsonnet @@ -44,5 +44,10 @@ // The default refresh time for all dashboards, default to 60s refresh: '60s', }, + + // Opt-out of multi-cluster dashboards by overriding this. + showMultiCluster: true, + // The cluster label to infer the cluster name from. + clusterLabel: 'cluster', }, } diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet index d99dac35d1..2bdd168cc9 100644 --- a/documentation/prometheus-mixin/dashboards.libsonnet +++ b/documentation/prometheus-mixin/dashboards.libsonnet @@ -10,19 +10,32 @@ local template = grafana.template; { grafanaDashboards+:: { 'prometheus.json': - g.dashboard( + local showMultiCluster = $._config.showMultiCluster; + local dashboard = g.dashboard( '%(prefix)sOverview' % $._config.grafanaPrometheus - ) - .addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job') - .addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance') + ); + local templatedDashboard = if showMultiCluster then + dashboard + .addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, $._config.clusterLabel) + .addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job') + .addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance') + else + dashboard + .addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job') + .addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance'); + templatedDashboard .addRow( g.row('Prometheus Stats') .addPanel( g.panel('Prometheus Stats') + - g.tablePanel([ + g.tablePanel(if showMultiCluster then [ + 'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', + 'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', + ] else [ 'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})', 'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})', ], { + cluster: { alias: if showMultiCluster then 'Cluster' else '' }, job: { alias: 'Job' }, instance: { alias: 'Instance' }, version: { alias: 'Version' }, @@ -35,12 +48,18 @@ local template = grafana.template; g.row('Discovery') .addPanel( g.panel('Target Sync') + - g.queryPanel('sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3', '{{scrape_job}}') + + g.queryPanel(if showMultiCluster then 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3' + else 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3', + if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}' + else '{{scrape_job}}') + { yaxes: g.yaxes('ms') } ) .addPanel( g.panel('Targets') + - g.queryPanel('sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})', 'Targets') + + g.queryPanel(if showMultiCluster then 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})' + else 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})', + if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}' + else 'Targets') + g.stack ) ) @@ -48,18 +67,33 @@ local template = grafana.template; g.row('Retrieval') .addPanel( g.panel('Average Scrape Interval Duration') + - g.queryPanel('rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3', '{{interval}} configured') + + g.queryPanel(if showMultiCluster then 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' + else 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3', + if showMultiCluster then '{{cluster}}:{{job}}:{{instance}} {{interval}} configured' + else '{{interval}} configured') + { yaxes: g.yaxes('ms') } ) .addPanel( g.panel('Scrape failures') + - g.queryPanel([ + g.queryPanel(if showMultiCluster then [ + 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', + 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', + 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', + 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', + 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', + ] else [ 'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))', 'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))', 'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))', 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))', 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))', - ], [ + ], if showMultiCluster then [ + 'exceeded body size limit: {{cluster}} {{job}} {{instance}}', + 'exceeded sample limit: {{cluster}} {{job}} {{instance}}', + 'duplicate timestamp: {{cluster}} {{job}} {{instance}}', + 'out of bounds: {{cluster}} {{job}} {{instance}}', + 'out of order: {{cluster}} {{job}} {{instance}}', + ] else [ 'exceeded body size limit: {{job}}', 'exceeded sample limit: {{job}}', 'duplicate timestamp: {{job}}', @@ -70,7 +104,10 @@ local template = grafana.template; ) .addPanel( g.panel('Appended Samples') + - g.queryPanel('rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])', '{{job}} {{instance}}') + + g.queryPanel(if showMultiCluster then 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' + else 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])', + if showMultiCluster then '{{cluster}} {{job}} {{instance}}' + else '{{job}} {{instance}}') + g.stack ) ) @@ -78,12 +115,18 @@ local template = grafana.template; g.row('Storage') .addPanel( g.panel('Head Series') + - g.queryPanel('prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}', '{{job}} {{instance}} head series') + + g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + else 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}', + if showMultiCluster then '{{cluster}} {{job}} {{instance}} head series' + else '{{job}} {{instance}} head series') + g.stack ) .addPanel( g.panel('Head Chunks') + - g.queryPanel('prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}', '{{job}} {{instance}} head chunks') + + g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + else 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}', + if showMultiCluster then '{{cluster}} {{job}} {{instance}} head chunks' + else '{{job}} {{instance}} head chunks') + g.stack ) ) @@ -91,12 +134,18 @@ local template = grafana.template; g.row('Query') .addPanel( g.panel('Query Rate') + - g.queryPanel('rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', '{{job}} {{instance}}') + + g.queryPanel(if showMultiCluster then 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' + else 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', + if showMultiCluster then '{{cluster}} {{job}} {{instance}}' + else '{{job}} {{instance}}') + g.stack, ) .addPanel( g.panel('Stage Duration') + - g.queryPanel('max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3', '{{slice}}') + + g.queryPanel(if showMultiCluster then 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' + else 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3', + if showMultiCluster then '{{slice}}' + else '{{slice}}') + { yaxes: g.yaxes('ms') } + g.stack, ) @@ -117,7 +166,7 @@ local template = grafana.template; ( prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"} - - ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"} != 0) + ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"} != 0) ) |||, legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}', @@ -134,7 +183,7 @@ local template = grafana.template; clamp_min( rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m]) - - ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"}[5m]) + ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) , 0) |||, legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}', @@ -151,9 +200,9 @@ local template = grafana.template; rate( prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m]) - - ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])) + ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) - - (rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance"}[5m])) + (rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) |||, legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -166,7 +215,7 @@ local template = grafana.template; min_span=6, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -177,7 +226,7 @@ local template = grafana.template; span=4, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -188,7 +237,7 @@ local template = grafana.template; span=4, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -199,7 +248,7 @@ local template = grafana.template; span=4, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -210,7 +259,7 @@ local template = grafana.template; span=6, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -222,7 +271,7 @@ local template = grafana.template; span=6, ) .addTarget(prometheus.target( - 'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance"} or prometheus_remote_storage_samples_pending{cluster=~"$cluster", instance=~"$instance"}', + 'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -257,7 +306,7 @@ local template = grafana.template; span=3, ) .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance"}[5m])', + 'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -268,7 +317,7 @@ local template = grafana.template; span=3, ) .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", instance=~"$instance"}[5m])', + 'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -279,7 +328,7 @@ local template = grafana.template; span=3, ) .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", instance=~"$instance"}[5m])', + 'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); @@ -290,7 +339,7 @@ local template = grafana.template; span=3, ) .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance"}[5m])', + 'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' )); diff --git a/go.mod b/go.mod index 3e97cdda0d..bc323f29b2 100644 --- a/go.mod +++ b/go.mod @@ -1,160 +1,156 @@ module github.com/prometheus/prometheus -go 1.19 +go 1.22.0 + +toolchain go1.23.0 require ( - github.com/Azure/azure-sdk-for-go v65.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 - github.com/Azure/go-autorest/autorest v0.11.29 - github.com/Azure/go-autorest/autorest/adal v0.9.23 - github.com/alecthomas/kingpin/v2 v2.3.2 - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.44.302 - github.com/cespare/xxhash/v2 v2.2.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 + github.com/Code-Hex/go-generics-cache v1.5.1 + github.com/KimMachineGun/automemlimit v0.6.1 + github.com/alecthomas/kingpin/v2 v2.4.0 + github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 + github.com/aws/aws-sdk-go v1.55.5 + github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 + github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.99.0 - github.com/docker/docker v24.0.4+incompatible + github.com/digitalocean/godo v1.122.0 + github.com/docker/docker v27.2.0+incompatible github.com/edsrzf/mmap-go v1.1.0 - github.com/envoyproxy/go-control-plane v0.11.1 - github.com/envoyproxy/protoc-gen-validate v1.0.2 - github.com/fsnotify/fsnotify v1.6.0 + github.com/envoyproxy/go-control-plane v0.13.0 + github.com/envoyproxy/protoc-gen-validate v1.1.0 + github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb + github.com/fsnotify/fsnotify v1.7.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 - github.com/go-openapi/strfmt v0.21.7 - github.com/go-zookeeper/zk v1.0.3 + github.com/go-openapi/strfmt v0.23.0 + github.com/go-zookeeper/zk v1.0.4 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 - github.com/gophercloud/gophercloud v1.5.0 - github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd + github.com/google/go-cmp v0.6.0 + github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da + github.com/google/uuid v1.6.0 + github.com/gophercloud/gophercloud v1.14.0 + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.22.0 - github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e - github.com/hetznercloud/hcloud-go/v2 v2.0.0 - github.com/ionos-cloud/sdk-go/v6 v6.1.8 + github.com/hashicorp/consul/api v1.29.4 + github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 + github.com/hetznercloud/hcloud-go/v2 v2.13.1 + github.com/ionos-cloud/sdk-go/v6 v6.2.1 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.16.7 + github.com/klauspost/compress v1.17.9 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.19.0 - github.com/miekg/dns v1.1.55 + github.com/linode/linodego v1.41.0 + github.com/miekg/dns v1.1.62 + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f + github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 - github.com/ovh/go-ovh v1.4.1 - github.com/pkg/errors v0.9.1 - github.com/prometheus/alertmanager v0.25.0 - github.com/prometheus/client_golang v1.16.0 - github.com/prometheus/client_model v0.4.0 - github.com/prometheus/common v0.44.0 + github.com/ovh/go-ovh v1.6.0 + github.com/prometheus/alertmanager v0.27.0 + github.com/prometheus/client_golang v1.20.4 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.59.1 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.10.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 + github.com/prometheus/exporter-toolkit v0.12.0 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 - go.opentelemetry.io/collector/semconv v0.81.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 - go.opentelemetry.io/otel v1.16.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 - go.opentelemetry.io/otel/sdk v1.16.0 - go.opentelemetry.io/otel/trace v1.16.0 + go.opentelemetry.io/collector/pdata v1.16.0 + go.opentelemetry.io/collector/semconv v0.110.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 + go.opentelemetry.io/otel v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 + go.opentelemetry.io/otel/sdk v1.30.0 + go.opentelemetry.io/otel/trace v1.30.0 go.uber.org/atomic v1.11.0 - go.uber.org/automaxprocs v1.5.2 - go.uber.org/goleak v1.2.1 + go.uber.org/automaxprocs v1.5.3 + go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/net v0.12.0 - golang.org/x/oauth2 v0.10.0 - golang.org/x/sync v0.3.0 - golang.org/x/sys v0.10.0 - golang.org/x/time v0.3.0 - golang.org/x/tools v0.11.0 - google.golang.org/api v0.132.0 - google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 - google.golang.org/grpc v1.56.2 - google.golang.org/protobuf v1.31.0 + golang.org/x/oauth2 v0.23.0 + golang.org/x/sync v0.8.0 + golang.org/x/sys v0.25.0 + golang.org/x/text v0.18.0 + golang.org/x/time v0.6.0 + golang.org/x/tools v0.24.0 + google.golang.org/api v0.199.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/grpc v1.67.0 + google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.27.3 - k8s.io/apimachinery v0.27.3 - k8s.io/client-go v0.27.3 + k8s.io/api v0.31.0 + k8s.io/apimachinery v0.31.0 + k8s.io/client-go v0.31.0 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.100.1 -) - -require ( - cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/google/s2a-go v0.1.4 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect + k8s.io/klog/v2 v2.130.1 ) require ( - cloud.google.com/go/compute v1.22.0 // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect - github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect + cloud.google.com/go/auth v0.9.5 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cilium/ebpf v0.11.0 // indirect + github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect + github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/distribution/reference v0.5.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.10.2 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/fatih/color v1.15.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kit/kit v0.12.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.20.4 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/swag v0.22.4 // indirect - github.com/go-openapi/validate v0.22.1 // indirect - github.com/go-resty/resty/v2 v2.7.0 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/go-openapi/analysis v0.22.2 // indirect + github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/loads v0.21.5 // indirect + github.com/go-openapi/spec v0.20.14 // indirect + github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-openapi/validate v0.23.0 // indirect + github.com/go-resty/resty/v2 v2.13.1 // indirect + github.com/godbus/dbus/v5 v5.0.4 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/golang/glog v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 - github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-retryablehttp v0.7.4 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect @@ -163,46 +159,57 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mdlayher/vsock v1.2.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.11.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.mongodb.org/mongo-driver v1.12.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect - go.opentelemetry.io/proto/otlp v1.0.0 // indirect - golang.org/x/crypto v0.11.0 // indirect - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 - golang.org/x/mod v0.12.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - google.golang.org/appengine v1.6.7 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/term v0.24.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect - k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 // indirect - k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) replace ( k8s.io/klog => github.com/simonpasquier/klog-gokit v0.3.0 - k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.3.0 + k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.5.0 ) // Exclude linodego v1.0.0 as it is no longer published on github. diff --git a/go.sum b/go.sum index f5661fc9e4..f2a80912f7 100644 --- a/go.sum +++ b/go.sum @@ -12,16 +12,18 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= +cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.22.0 h1:cB8R6FtUtT1TYGl5R3xuxnW6OUIc/DrT2aiR16TTG7Y= -cloud.google.com/go/compute v1.22.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -34,60 +36,47 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= -github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= -github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= +github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8= +github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= -github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -98,48 +87,47 @@ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.302 h1:ST3ko6GrJKn3Xi+nAvxjG3uk/V1pW8KC52WLeIxqqNk= -github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= +github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -147,7 +135,6 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -156,18 +143,18 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= -github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA= +github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg= +github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.4+incompatible h1:s/LVDftw9hjblvqIeTiGYXBCD95nOEEl7qRsRrIOuQI= -github.com/docker/docker v24.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= +github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= @@ -175,35 +162,35 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= -github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= +github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -223,78 +210,37 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= -github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= -github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= -github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= -github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= +github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= +github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= +github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= +github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= +github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= +github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= +github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -302,12 +248,11 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -335,19 +280,17 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -359,8 +302,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -375,24 +319,23 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= -github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= +github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= +github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= -github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo= -github.com/gophercloud/gophercloud v1.5.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8= +github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -400,20 +343,23 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE= -github.com/hashicorp/consul/api v1.22.0/go.mod h1:zHpYgZ7TeYqS6zaszjwSt128OwESRpnhU9aGa6ue3Eg= +github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= +github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= +github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= +github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.14.0 h1:Hly+BMNMssVzoWddbBnBFi3W+Fzytvm0haSkihhj3GU= +github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= +github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -423,32 +369,35 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= -github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= +github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -460,13 +409,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs= -github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g= -github.com/hetznercloud/hcloud-go/v2 v2.0.0/go.mod h1:4iUG2NG8b61IAwNx6UsMWQ6IfIf/i1RsG0BbsKAyR5Q= +github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ= +github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -474,15 +423,15 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0= -github.com/ionos-cloud/sdk-go/v6 v6.1.8/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= -github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= +github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= +github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= +github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= +github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -502,24 +451,19 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -528,16 +472,11 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw= -github.com/linode/linodego v1.19.0/go.mod h1:XZFR+yJ9mm2kwf6itZ6SCpu+6w3KnIevV0Uu5HNWJgQ= +github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY= +github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -552,18 +491,21 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= +github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= +github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -571,14 +513,15 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -588,7 +531,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -603,7 +545,8 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 h1:dOYG7LS/WK00RWZc8XGgcUTlTxpp3mKhdR2Q9z9HbXM= +github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= @@ -614,14 +557,18 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -630,32 +577,36 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= -github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M= +github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= +github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prometheus/alertmanager v0.25.0 h1:vbXKUR6PYRiZPRIKfmXaG+dmCKG52RtPL4Btl8hQGvg= -github.com/prometheus/alertmanager v0.25.0/go.mod h1:MEZ3rFVHqKZsw7IcNS/m4AWZeXThmJhumpiWR4eHU/w= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= +github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -663,16 +614,16 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= @@ -680,102 +631,89 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= +github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.10.0 h1:yOAzZTi4M22ZzVxD+fhy1URTuNRj/36uQJJ5S8IPza8= -github.com/prometheus/exporter-toolkit v0.10.0/go.mod h1:+sVFzuvV5JDyw+Ih6p3zFxZNVnKQa3x5qPmDSiPu4ZY= +github.com/prometheus/exporter-toolkit v0.12.0 h1:DkE5RcEZR3lQA2QD5JLVQIf41dFKNsVMXFhgqcif7fo= +github.com/prometheus/exporter-toolkit v0.12.0/go.mod h1:fQH0KtTn0yrrS0S82kqppRjDDiwMfIQUwT+RBRRhwUc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= -github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU= +github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= +github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs= github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k= -github.com/simonpasquier/klog-gokit/v3 v3.3.0 h1:HMzH999kO5gEgJTaWWO+xjncW5oycspcsBnjn9b853Q= -github.com/simonpasquier/klog-gokit/v3 v3.3.0/go.mod h1:uSbnWC3T7kt1dQyY9sjv0Ao1SehMAJdVnUNSKhjaDsg= +github.com/simonpasquier/klog-gokit/v3 v3.5.0 h1:ewnk+ickph0hkQFgdI4pffKIbruAxxWcg0Fe/vQmLOM= +github.com/simonpasquier/klog-gokit/v3 v3.5.0/go.mod h1:S9flvRzzpaYLYtXI2w8jf9R/IU/Cy14NrbvDUevNP1E= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -783,11 +721,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= -go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -797,39 +732,36 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4= -go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw= -go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 h1:iqjq9LAB8aK++sKVcELezzn655JnBNdsDhghU4G/So8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0/go.mod h1:hGXzO5bhhSHZnKvrDaXB82Y9DRFour0Nz/KrBh7reWw= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= -go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto= +go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4= +go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk= +go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= -go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -840,22 +772,18 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -866,8 +794,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -889,8 +817,9 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -928,31 +857,28 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -961,8 +887,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -974,13 +901,10 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1013,51 +937,54 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1067,13 +994,9 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1111,8 +1034,9 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= -golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1132,8 +1056,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc= -google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0= +google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= +google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1141,8 +1065,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1172,13 +1094,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw= -google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM= -google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE= -google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1197,11 +1116,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1213,19 +1129,18 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -1246,9 +1161,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -1262,24 +1174,24 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= -k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg= -k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= -k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= -k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= -k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48= -k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 h1:OmK1d0WrkD3IPfkskvroRykOulHVHf0s0ZIFRjyt+UI= -k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515/go.mod h1:kzo02I3kQ4BTtEfVLaPbjvCkX97YqGve33wzlb3fofQ= -k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= -k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/model/exemplar/exemplar.go b/model/exemplar/exemplar.go index 2e39cf6892..2c28b17257 100644 --- a/model/exemplar/exemplar.go +++ b/model/exemplar/exemplar.go @@ -15,7 +15,9 @@ package exemplar import "github.com/prometheus/prometheus/model/labels" -// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters +// ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of +// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 +// UTF-8 characters." // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars const ExemplarMaxLabelSetLength = 128 @@ -48,3 +50,18 @@ func (e Exemplar) Equals(e2 Exemplar) bool { return e.Value == e2.Value } + +// Compare first timestamps, then values, then labels. +func Compare(a, b Exemplar) int { + if a.Ts < b.Ts { + return -1 + } else if a.Ts > b.Ts { + return 1 + } + if a.Value < b.Value { + return -1 + } else if a.Value > b.Value { + return 1 + } + return labels.Compare(a.Labels, b.Labels) +} diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index d3f013935c..b0deb6cc4d 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -15,6 +15,7 @@ package histogram import ( "fmt" + "math" "strings" ) @@ -29,11 +30,12 @@ import ( type FloatHistogram struct { // Counter reset information. CounterResetHint CounterResetHint - // Currently valid schema numbers are -4 <= n <= 8. They are all for - // base-2 bucket schemas, where 1 is a bucket boundary in each case, and - // then each power of two is divided into 2^n logarithmic buckets. Or - // in other words, each bucket boundary is the previous boundary times - // 2^(2^-n). + // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in + // each case, and then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by + // the CustomValues field. Schema int32 // Width of the zero bucket. ZeroThreshold float64 @@ -48,40 +50,110 @@ type FloatHistogram struct { // Observation counts in buckets. Each represents an absolute count and // must be zero or positive. PositiveBuckets, NegativeBuckets []float64 + // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. + // This slice is interned, to be treated as immutable and copied by reference. + // These numbers should be strictly increasing. This field is only used when the + // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans + // and NegativeBuckets fields are not used in that case. + CustomValues []float64 +} + +func (h *FloatHistogram) UsesCustomBuckets() bool { + return IsCustomBucketsSchema(h.Schema) } // Copy returns a deep copy of the Histogram. func (h *FloatHistogram) Copy() *FloatHistogram { - c := *h + c := FloatHistogram{ + CounterResetHint: h.CounterResetHint, + Schema: h.Schema, + Count: h.Count, + Sum: h.Sum, + } + + if h.UsesCustomBuckets() { + if len(h.CustomValues) != 0 { + c.CustomValues = make([]float64, len(h.CustomValues)) + copy(c.CustomValues, h.CustomValues) + } + } else { + c.ZeroThreshold = h.ZeroThreshold + c.ZeroCount = h.ZeroCount + + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]float64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + } - if h.PositiveSpans != nil { + if len(h.PositiveSpans) != 0 { c.PositiveSpans = make([]Span, len(h.PositiveSpans)) copy(c.PositiveSpans, h.PositiveSpans) } - if h.NegativeSpans != nil { - c.NegativeSpans = make([]Span, len(h.NegativeSpans)) - copy(c.NegativeSpans, h.NegativeSpans) - } - if h.PositiveBuckets != nil { + if len(h.PositiveBuckets) != 0 { c.PositiveBuckets = make([]float64, len(h.PositiveBuckets)) copy(c.PositiveBuckets, h.PositiveBuckets) } - if h.NegativeBuckets != nil { - c.NegativeBuckets = make([]float64, len(h.NegativeBuckets)) - copy(c.NegativeBuckets, h.NegativeBuckets) - } return &c } +// CopyTo makes a deep copy into the given FloatHistogram. +// The destination object has to be a non-nil pointer. +func (h *FloatHistogram) CopyTo(to *FloatHistogram) { + to.CounterResetHint = h.CounterResetHint + to.Schema = h.Schema + to.Count = h.Count + to.Sum = h.Sum + + if h.UsesCustomBuckets() { + to.ZeroThreshold = 0 + to.ZeroCount = 0 + + to.NegativeSpans = clearIfNotNil(to.NegativeSpans) + to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) + + to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) + copy(to.CustomValues, h.CustomValues) + } else { + to.ZeroThreshold = h.ZeroThreshold + to.ZeroCount = h.ZeroCount + + to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) + copy(to.NegativeSpans, h.NegativeSpans) + + to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) + copy(to.NegativeBuckets, h.NegativeBuckets) + + to.CustomValues = clearIfNotNil(to.CustomValues) + } + + to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) + copy(to.PositiveSpans, h.PositiveSpans) + + to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) + copy(to.PositiveBuckets, h.PositiveBuckets) +} + // CopyToSchema works like Copy, but the returned deep copy has the provided // target schema, which must be ≤ the original schema (i.e. it must have a lower -// resolution). +// resolution). This method panics if a custom buckets schema is used in the +// receiving FloatHistogram or as the provided targetSchema. func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { if targetSchema == h.Schema { // Fast path. return h.Copy() } + if h.UsesCustomBuckets() { + panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema)) + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } if targetSchema > h.Schema { panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema)) } @@ -93,8 +165,8 @@ func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { Sum: h.Sum, } - c.PositiveSpans, c.PositiveBuckets = mergeToSchema(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema) - c.NegativeSpans, c.NegativeBuckets = mergeToSchema(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema) + c.PositiveSpans, c.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, false) + c.NegativeSpans, c.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, false) return &c } @@ -130,14 +202,81 @@ func (h *FloatHistogram) String() string { return sb.String() } -// ZeroBucket returns the zero bucket. +// TestExpression returns the string representation of this histogram as it is used in the internal PromQL testing +// framework as well as in promtool rules unit tests. +// The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series +func (h *FloatHistogram) TestExpression() string { + var res []string + m := h.Copy() + + m.Compact(math.MaxInt) // Compact to reduce the number of positive and negative spans to 1. + + if m.Schema != 0 { + res = append(res, fmt.Sprintf("schema:%d", m.Schema)) + } + if m.Count != 0 { + res = append(res, fmt.Sprintf("count:%g", m.Count)) + } + if m.Sum != 0 { + res = append(res, fmt.Sprintf("sum:%g", m.Sum)) + } + if m.ZeroCount != 0 { + res = append(res, fmt.Sprintf("z_bucket:%g", m.ZeroCount)) + } + if m.ZeroThreshold != 0 { + res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold)) + } + if m.UsesCustomBuckets() { + res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues)) + } + + switch m.CounterResetHint { + case UnknownCounterReset: + // Unknown is the default, don't add anything. + case CounterReset: + res = append(res, "counter_reset_hint:reset") + case NotCounterReset: + res = append(res, "counter_reset_hint:not_reset") + case GaugeType: + res = append(res, "counter_reset_hint:gauge") + } + + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { + if len(spans) > 1 { + panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) + } + for _, span := range spans { + if span.Offset != 0 { + res = append(res, fmt.Sprintf("%s:%d", offsetKey, span.Offset)) + } + } + + var bucketStr []string + for _, bucket := range buckets { + bucketStr = append(bucketStr, fmt.Sprintf("%g", bucket)) + } + if len(bucketStr) > 0 { + res = append(res, fmt.Sprintf("%s:[%s]", bucketsKey, strings.Join(bucketStr, " "))) + } + return res + } + res = addBuckets("positive", "buckets", "offset", m.PositiveBuckets, m.PositiveSpans) + res = addBuckets("negative", "n_buckets", "n_offset", m.NegativeBuckets, m.NegativeSpans) + return "{{" + strings.Join(res, " ") + "}}" +} + +// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets. func (h *FloatHistogram) ZeroBucket() Bucket[float64] { + if h.UsesCustomBuckets() { + panic("histograms with custom buckets have no zero bucket") + } return Bucket[float64]{ Lower: -h.ZeroThreshold, Upper: h.ZeroThreshold, LowerInclusive: true, UpperInclusive: true, Count: h.ZeroCount, + // Index is irrelevant for the zero bucket. } } @@ -159,7 +298,7 @@ func (h *FloatHistogram) Mul(factor float64) *FloatHistogram { return h } -// Div works like Scale but divides instead of multiplies. +// Div works like Mul but divides instead of multiplies. // When dividing by 0, everything will be set to Inf. func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { h.ZeroCount /= scalar @@ -181,14 +320,20 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { // resulting histogram might have buckets with a population of zero or directly // adjacent spans (offset=0). To normalize those, call the Compact method. // -// The method reconciles differences in the zero threshold and in the schema, -// but the schema of the other histogram must be ≥ the schema of the receiving -// histogram (i.e. must have an equal or higher resolution). This means that the -// schema of the receiving histogram won't change. Its zero threshold, however, -// will change if needed. The other histogram will not be modified in any case. +// The method reconciles differences in the zero threshold and in the schema, and +// changes them if needed. The other histogram will not be modified in any case. +// Adding is currently only supported between 2 exponential histograms, or between +// 2 custom buckets histograms with the exact same custom bounds. // // This method returns a pointer to the receiving histogram for convenience. -func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { +func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { + if h.UsesCustomBuckets() != other.UsesCustomBuckets() { + return nil, ErrHistogramsIncompatibleSchema + } + if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) { + return nil, ErrHistogramsIncompatibleBounds + } + switch { case other.CounterResetHint == h.CounterResetHint: // Adding apples to apples, all good. No need to change anything. @@ -208,191 +353,181 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { default: // All other cases shouldn't actually happen. // They are a direct collision of CounterReset and NotCounterReset. - // Conservatively set the CounterResetHint to "unknown" and isse a warning. + // Conservatively set the CounterResetHint to "unknown" and issue a warning. h.CounterResetHint = UnknownCounterReset // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place } - otherZeroCount := h.reconcileZeroBuckets(other) - h.ZeroCount += otherZeroCount + if !h.UsesCustomBuckets() { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount += otherZeroCount + } h.Count += other.Count h.Sum += other.Sum - // TODO(beorn7): If needed, this can be optimized by inspecting the - // spans in other and create missing buckets in h in batches. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index + var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + otherPositiveSpans = other.PositiveSpans + otherPositiveBuckets = other.PositiveBuckets + ) + + if h.UsesCustomBuckets() { + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + return h, nil } - return h + + var ( + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherNegativeSpans = other.NegativeSpans + otherNegativeBuckets = other.NegativeBuckets + ) + + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + + case other.Schema > h.Schema: + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) + } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + + return h, nil } // Sub works like Add but subtracts the other histogram. -func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { - otherZeroCount := h.reconcileZeroBuckets(other) - h.ZeroCount -= otherZeroCount +func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) { + if h.UsesCustomBuckets() != other.UsesCustomBuckets() { + return nil, ErrHistogramsIncompatibleSchema + } + if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) { + return nil, ErrHistogramsIncompatibleBounds + } + + if !h.UsesCustomBuckets() { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount -= otherZeroCount + } h.Count -= other.Count h.Sum -= other.Sum - // TODO(beorn7): If needed, this can be optimized by inspecting the - // spans in other and create missing buckets in h in batches. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - b.Count *= -1 - h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - b.Count *= -1 - h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index + var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + otherPositiveSpans = other.PositiveSpans + otherPositiveBuckets = other.PositiveBuckets + ) + + if h.UsesCustomBuckets() { + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + return h, nil } - return h + + var ( + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherNegativeSpans = other.NegativeSpans + otherNegativeBuckets = other.NegativeBuckets + ) + + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + case other.Schema > h.Schema: + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) + } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + + return h, nil } // Equals returns true if the given float histogram matches exactly. // Exact match is when there are no new buckets (even empty) and no missing buckets, // and all the bucket values match. Spans can have different empty length spans in between, // but they must represent the same bucket layout to match. +// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns +// because this method is about data equality rather than mathematical equality. +// We ignore fields that are not used based on the exponential / custom buckets schema, +// but check fields where differences may cause unintended behaviour even if they are not +// supposed to be used according to the schema. func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { if h2 == nil { return false } - if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || - h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum { + if h.Schema != h2.Schema || + math.Float64bits(h.Count) != math.Float64bits(h2.Count) || + math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { return false } - if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + if h.UsesCustomBuckets() { + if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) { + return false + } + } + + if h.ZeroThreshold != h2.ZeroThreshold || + math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) { return false } + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { return false } + if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } - if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { return false } - if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { return false } return true } -// addBucket takes the "coordinates" of the last bucket that was handled and -// adds the provided bucket after it. If a corresponding bucket exists, the -// count is added. If not, the bucket is inserted. The updated slices and the -// coordinates of the inserted or added-to bucket are returned. -func addBucket( - b Bucket[float64], - spans []Span, buckets []float64, - iSpan, iBucket int, - iInSpan, index int32, -) ( - newSpans []Span, newBuckets []float64, - newISpan, newIBucket int, newIInSpan int32, -) { - if iSpan == -1 { - // First add, check if it is before all spans. - if len(spans) == 0 || spans[0].Offset > b.Index { - // Add bucket before all others. - buckets = append(buckets, 0) - copy(buckets[1:], buckets) - buckets[0] = b.Count - if len(spans) > 0 && spans[0].Offset == b.Index+1 { - spans[0].Length++ - spans[0].Offset-- - return spans, buckets, 0, 0, 0 - } - spans = append(spans, Span{}) - copy(spans[1:], spans) - spans[0] = Span{Offset: b.Index, Length: 1} - if len(spans) > 1 { - // Convert the absolute offset in the formerly - // first span to a relative offset. - spans[1].Offset -= b.Index + 1 - } - return spans, buckets, 0, 0, 0 - } - if spans[0].Offset == b.Index { - // Just add to first bucket. - buckets[0] += b.Count - return spans, buckets, 0, 0, 0 - } - // We are behind the first bucket, so set everything to the - // first bucket and continue normally. - iSpan, iBucket, iInSpan = 0, 0, 0 - index = spans[0].Offset - } - deltaIndex := b.Index - index - for { - remainingInSpan := int32(spans[iSpan].Length) - iInSpan - if deltaIndex < remainingInSpan { - // Bucket is in current span. - iBucket += int(deltaIndex) - iInSpan += deltaIndex - buckets[iBucket] += b.Count - return spans, buckets, iSpan, iBucket, iInSpan - } - deltaIndex -= remainingInSpan - iBucket += int(remainingInSpan) - iSpan++ - if iSpan == len(spans) || deltaIndex < spans[iSpan].Offset { - // Bucket is in gap behind previous span (or there are no further spans). - buckets = append(buckets, 0) - copy(buckets[iBucket+1:], buckets[iBucket:]) - buckets[iBucket] = b.Count - if deltaIndex == 0 { - // Directly after previous span, extend previous span. - if iSpan < len(spans) { - spans[iSpan].Offset-- - } - iSpan-- - iInSpan = int32(spans[iSpan].Length) - spans[iSpan].Length++ - return spans, buckets, iSpan, iBucket, iInSpan - } - if iSpan < len(spans) && deltaIndex == spans[iSpan].Offset-1 { - // Directly before next span, extend next span. - iInSpan = 0 - spans[iSpan].Offset-- - spans[iSpan].Length++ - return spans, buckets, iSpan, iBucket, iInSpan - } - // No next span, or next span is not directly adjacent to new bucket. - // Add new span. - iInSpan = 0 - if iSpan < len(spans) { - spans[iSpan].Offset -= deltaIndex + 1 - } - spans = append(spans, Span{}) - copy(spans[iSpan+1:], spans[iSpan:]) - spans[iSpan] = Span{Length: 1, Offset: deltaIndex} - return spans, buckets, iSpan, iBucket, iInSpan - } - // Try start of next span. - deltaIndex -= spans[iSpan].Offset - iInSpan = 0 - } +// Size returns the total size of the FloatHistogram, which includes the size of the pointer +// to FloatHistogram, all its fields, and all elements contained in slices. +// NOTE: this is only valid for 64 bit architectures. +func (h *FloatHistogram) Size() int { + // Size of each slice separately. + posSpanSize := len(h.PositiveSpans) * 8 // 8 bytes (int32 + uint32). + negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32). + posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64). + negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64). + customBoundSize := len(h.CustomValues) * 8 // 8 bytes (float64). + + // Total size of the struct. + + // fh is 8 bytes. + // fh.CounterResetHint is 4 bytes (1 byte bool + 3 bytes padding). + // fh.Schema is 4 bytes. + // fh.ZeroThreshold is 8 bytes. + // fh.ZeroCount is 8 bytes. + // fh.Count is 8 bytes. + // fh.Sum is 8 bytes. + // fh.PositiveSpans is 24 bytes. + // fh.NegativeSpans is 24 bytes. + // fh.PositiveBuckets is 24 bytes. + // fh.NegativeBuckets is 24 bytes. + // fh.CustomValues is 24 bytes. + structSize := 168 + + return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize } // Compact eliminates empty buckets at the beginning and end of each span, then @@ -472,11 +607,17 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { // is a counter reset or not. // We do the same if the CounterResetHint is GaugeType, which should not happen, but PromQL still // allows the user to apply functions to gauge histograms that are only meant for counter histograms. - // In this case, we treat the gauge histograms as a counter histograms - // (and we plan to return a warning about it to the user). + // In this case, we treat the gauge histograms as counter histograms. A warning should be returned + // to the user in this case. if h.Count < previous.Count { return true } + if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) { + // Mark that something has changed or that the application has been restarted. However, this does + // not matter so much since the change in schema will be handled directly in the chunks and PromQL + // functions. + return true + } if h.Schema > previous.Schema { return true } @@ -495,25 +636,25 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { } currIt := h.floatBucketIterator(true, h.ZeroThreshold, h.Schema) prevIt := previous.floatBucketIterator(true, h.ZeroThreshold, h.Schema) - if detectReset(currIt, prevIt) { + if detectReset(&currIt, &prevIt) { return true } currIt = h.floatBucketIterator(false, h.ZeroThreshold, h.Schema) prevIt = previous.floatBucketIterator(false, h.ZeroThreshold, h.Schema) - return detectReset(currIt, prevIt) + return detectReset(&currIt, &prevIt) } -func detectReset(currIt, prevIt BucketIterator[float64]) bool { +func detectReset(currIt, prevIt *floatBucketIterator) bool { if !prevIt.Next() { return false // If no buckets in previous histogram, nothing can be reset. } - prevBucket := prevIt.At() + prevBucket := prevIt.strippedAt() if !currIt.Next() { // No bucket in current, but at least one in previous // histogram. Check if any of those are non-zero, in which case // this is a reset. for { - if prevBucket.Count != 0 { + if prevBucket.count != 0 { return true } if !prevIt.Next() { @@ -521,18 +662,18 @@ func detectReset(currIt, prevIt BucketIterator[float64]) bool { } } } - currBucket := currIt.At() + currBucket := currIt.strippedAt() for { // Forward currIt until we find the bucket corresponding to prevBucket. - for currBucket.Index < prevBucket.Index { + for currBucket.index < prevBucket.index { if !currIt.Next() { // Reached end of currIt early, therefore // previous histogram has a bucket that the - // current one does not have. Unlass all + // current one does not have. Unless all // remaining buckets in the previous histogram // are unpopulated, this is a reset. for { - if prevBucket.Count != 0 { + if prevBucket.count != 0 { return true } if !prevIt.Next() { @@ -540,18 +681,18 @@ func detectReset(currIt, prevIt BucketIterator[float64]) bool { } } } - currBucket = currIt.At() + currBucket = currIt.strippedAt() } - if currBucket.Index > prevBucket.Index { + if currBucket.index > prevBucket.index { // Previous histogram has a bucket the current one does // not have. If it's populated, it's a reset. - if prevBucket.Count != 0 { + if prevBucket.count != 0 { return true } } else { // We have reached corresponding buckets in both iterators. // We can finally compare the counts. - if currBucket.Count < prevBucket.Count { + if currBucket.count < prevBucket.count { return true } } @@ -559,35 +700,39 @@ func detectReset(currIt, prevIt BucketIterator[float64]) bool { // Reached end of prevIt without finding offending buckets. return false } - prevBucket = prevIt.At() + prevBucket = prevIt.strippedAt() } } // PositiveBucketIterator returns a BucketIterator to iterate over all positive // buckets in ascending order (starting next to the zero bucket and going up). func (h *FloatHistogram) PositiveBucketIterator() BucketIterator[float64] { - return h.floatBucketIterator(true, 0, h.Schema) + it := h.floatBucketIterator(true, 0, h.Schema) + return &it } // NegativeBucketIterator returns a BucketIterator to iterate over all negative // buckets in descending order (starting next to the zero bucket and going // down). func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] { - return h.floatBucketIterator(false, 0, h.Schema) + it := h.floatBucketIterator(false, 0, h.Schema) + return &it } // PositiveReverseBucketIterator returns a BucketIterator to iterate over all // positive buckets in descending order (starting at the highest bucket and // going down towards the zero bucket). func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { - return newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) + it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues) + return &it } // NegativeReverseBucketIterator returns a BucketIterator to iterate over all // negative buckets in ascending order (starting at the lowest bucket and going // up towards the zero bucket). func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] { - return newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) + it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil) + return &it } // AllBucketIterator returns a BucketIterator to iterate over all negative, @@ -598,8 +743,8 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ h: h, - leftIter: h.NegativeReverseBucketIterator(), - rightIter: h.PositiveBucketIterator(), + leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil), + rightIter: h.floatBucketIterator(true, 0, h.Schema), state: -1, } } @@ -612,12 +757,59 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ h: h, - leftIter: h.PositiveReverseBucketIterator(), - rightIter: h.NegativeBucketIterator(), + leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues), + rightIter: h.floatBucketIterator(false, 0, h.Schema), state: -1, } } +// Validate validates consistency between span and bucket slices. Also, buckets are checked +// against negative values. We check to make sure there are no unexpected fields or field values +// based on the exponential / custom buckets schema. +// We do not check for h.Count being at least as large as the sum of the +// counts in the buckets because floating point precision issues can +// create false positives here. +func (h *FloatHistogram) Validate() error { + var nCount, pCount float64 + if h.UsesCustomBuckets() { + if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("custom buckets: %w", err) + } + if h.ZeroCount != 0 { + return fmt.Errorf("custom buckets: must have zero count of 0") + } + if h.ZeroThreshold != 0 { + return fmt.Errorf("custom buckets: must have zero threshold of 0") + } + if len(h.NegativeSpans) > 0 { + return fmt.Errorf("custom buckets: must not have negative spans") + } + if len(h.NegativeBuckets) > 0 { + return fmt.Errorf("custom buckets: must not have negative buckets") + } + } else { + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + if h.CustomValues != nil { + return fmt.Errorf("histogram with exponential schema must not have custom bounds") + } + } + err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) + if err != nil { + return fmt.Errorf("positive side: %w", err) + } + + return nil +} + // zeroCountForLargerThreshold returns what the histogram's zero count would be // if the ZeroThreshold had the provided larger (or equal) value. If the // provided value is less than the histogram's ZeroThreshold, the method panics. @@ -710,7 +902,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() { // reconcileZeroBuckets finds a zero bucket large enough to include the zero // buckets of both histograms (the receiving histogram and the other histogram) // with a zero threshold that is not within a populated bucket in either -// histogram. This method modifies the receiving histogram accourdingly, but +// histogram. This method modifies the receiving histogram accordingly, but // leaves the other histogram as is. Instead, it returns the zero count the // other histogram would have if it were modified. func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { @@ -734,31 +926,41 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { // If positive is true, the returned iterator iterates through the positive // buckets, otherwise through the negative buckets. // -// If absoluteStartValue is < the lowest absolute value of any upper bucket -// boundary, the iterator starts with the first bucket. Otherwise, it will skip -// all buckets with an absolute value of their upper boundary ≤ -// absoluteStartValue. +// Only for exponential schemas, if absoluteStartValue is < the lowest absolute +// value of any upper bucket boundary, the iterator starts with the first bucket. +// Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤ +// absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and +// no buckets are skipped. // // targetSchema must be ≤ the schema of FloatHistogram (and of course within the // legal values for schemas in general). The buckets are merged to match the -// targetSchema prior to iterating (without mutating FloatHistogram). +// targetSchema prior to iterating (without mutating FloatHistogram), but custom buckets +// schemas cannot be merged with other schemas. func (h *FloatHistogram) floatBucketIterator( positive bool, absoluteStartValue float64, targetSchema int32, -) *floatBucketIterator { +) floatBucketIterator { + if h.UsesCustomBuckets() && targetSchema != h.Schema { + panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema")) + } + if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) { + panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema")) + } if targetSchema > h.Schema { panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) } - i := &floatBucketIterator{ + i := floatBucketIterator{ baseBucketIterator: baseBucketIterator[float64, float64]{ schema: h.Schema, positive: positive, }, - targetSchema: targetSchema, - absoluteStartValue: absoluteStartValue, + targetSchema: targetSchema, + absoluteStartValue: absoluteStartValue, + boundReachedStartValue: absoluteStartValue == 0, } if positive { i.spans = h.PositiveSpans i.buckets = h.PositiveBuckets + i.customValues = h.CustomValues } else { i.spans = h.NegativeSpans i.buckets = h.NegativeBuckets @@ -766,16 +968,17 @@ func (h *FloatHistogram) floatBucketIterator( return i } -// reverseFloatbucketiterator is a low-level constructor for reverse bucket iterators. +// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators. func newReverseFloatBucketIterator( - spans []Span, buckets []float64, schema int32, positive bool, -) *reverseFloatBucketIterator { - r := &reverseFloatBucketIterator{ + spans []Span, buckets []float64, schema int32, positive bool, customValues []float64, +) reverseFloatBucketIterator { + r := reverseFloatBucketIterator{ baseBucketIterator: baseBucketIterator[float64, float64]{ - schema: schema, - spans: spans, - buckets: buckets, - positive: positive, + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + customValues: customValues, }, } @@ -798,6 +1001,8 @@ type floatBucketIterator struct { targetSchema int32 // targetSchema is the schema to merge to and must be ≤ schema. origIdx int32 // The bucket index within the original schema. absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value. + + boundReachedStartValue bool // Has getBound reached absoluteStartValue already? } func (i *floatBucketIterator) At() Bucket[float64] { @@ -810,74 +1015,92 @@ func (i *floatBucketIterator) Next() bool { return false } - // Copy all of these into local variables so that we can forward to the - // next bucket and then roll back if needed. - origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan - span := i.spans[spansIdx] - firstPass := true - i.currCount = 0 - -mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. - for { + if i.schema == i.targetSchema { + // Fast path for the common case. + span := i.spans[i.spansIdx] if i.bucketsIdx == 0 { // Seed origIdx for the first bucket. - origIdx = span.Offset + i.currIdx = span.Offset } else { - origIdx++ + i.currIdx++ } - for idxInSpan >= span.Length { + + for i.idxInSpan >= span.Length { // We have exhausted the current span and have to find a new // one. We even handle pathologic spans of length 0 here. - idxInSpan = 0 - spansIdx++ - if spansIdx >= len(i.spans) { - if firstPass { - return false - } - break mergeLoop + i.idxInSpan = 0 + i.spansIdx++ + if i.spansIdx >= len(i.spans) { + return false } - span = i.spans[spansIdx] - origIdx += span.Offset + span = i.spans[i.spansIdx] + i.currIdx += span.Offset } - currIdx := i.targetIdx(origIdx) - switch { - case firstPass: - i.currIdx = currIdx - firstPass = false - case currIdx != i.currIdx: - // Reached next bucket in targetSchema. - // Do not actually forward to the next bucket, but break out. - break mergeLoop - } - i.currCount += i.buckets[i.bucketsIdx] - idxInSpan++ + + i.currCount = i.buckets[i.bucketsIdx] + i.idxInSpan++ i.bucketsIdx++ - i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan - if i.schema == i.targetSchema { - // Don't need to test the next bucket for mergeability - // if we have no schema change anyway. - break mergeLoop + } else { + // Copy all of these into local variables so that we can forward to the + // next bucket and then roll back if needed. + origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan + span := i.spans[spansIdx] + firstPass := true + i.currCount = 0 + + mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. + for { + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + origIdx = span.Offset + } else { + origIdx++ + } + for idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + idxInSpan = 0 + spansIdx++ + if spansIdx >= len(i.spans) { + if firstPass { + return false + } + break mergeLoop + } + span = i.spans[spansIdx] + origIdx += span.Offset + } + currIdx := targetIdx(origIdx, i.schema, i.targetSchema) + switch { + case firstPass: + i.currIdx = currIdx + firstPass = false + case currIdx != i.currIdx: + // Reached next bucket in targetSchema. + // Do not actually forward to the next bucket, but break out. + break mergeLoop + } + i.currCount += i.buckets[i.bucketsIdx] + idxInSpan++ + i.bucketsIdx++ + i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan + if i.schema == i.targetSchema { + // Don't need to test the next bucket for mergeability + // if we have no schema change anyway. + break mergeLoop + } } } - // Skip buckets before absoluteStartValue. + + // Skip buckets before absoluteStartValue for exponential schemas. // TODO(beorn7): Maybe do something more efficient than this recursive call. - if getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { + if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue { return i.Next() } + i.boundReachedStartValue = true return true } -// targetIdx returns the bucket index within i.targetSchema for the given bucket -// index within i.schema. -func (i *floatBucketIterator) targetIdx(idx int32) int32 { - if i.schema == i.targetSchema { - // Fast path for the common case. The below would yield the same - // result, just with more effort. - return idx - } - return ((idx - 1) >> (i.schema - i.targetSchema)) + 1 -} - type reverseFloatBucketIterator struct { baseBucketIterator[float64, float64] idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection. @@ -904,8 +1127,9 @@ func (i *reverseFloatBucketIterator) Next() bool { } type allFloatBucketIterator struct { - h *FloatHistogram - leftIter, rightIter BucketIterator[float64] + h *FloatHistogram + leftIter reverseFloatBucketIterator + rightIter floatBucketIterator // -1 means we are iterating negative buckets. // 0 means it is time for the zero bucket. // 1 means we are iterating positive buckets. @@ -932,14 +1156,7 @@ func (i *allFloatBucketIterator) Next() bool { case 0: i.state = 1 if i.h.ZeroCount > 0 { - i.currBucket = Bucket[float64]{ - Lower: -i.h.ZeroThreshold, - Upper: i.h.ZeroThreshold, - LowerInclusive: true, - UpperInclusive: true, - Count: i.h.ZeroCount, - // Index is irrelevant for the zero bucket. - } + i.currBucket = i.h.ZeroBucket() return true } return i.Next() @@ -971,65 +1188,164 @@ func targetIdx(idx, originSchema, targetSchema int32) int32 { return ((idx - 1) >> (originSchema - targetSchema)) + 1 } -// mergeToSchema is used to merge a FloatHistogram's Spans and Buckets (no matter if -// positive or negative) from the original schema to the target schema. -// The target schema must be smaller than the original schema. -func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, targetSchema int32) ([]Span, []float64) { +// addBuckets adds the buckets described by spansB/bucketsB to the buckets described by spansA/bucketsA, +// creating missing buckets in spansA/bucketsA as needed. +// It returns the resulting spans/buckets (which must be used instead of the original spansA/bucketsA, +// although spansA/bucketsA might get modified by this function). +// All buckets must use the same provided schema. +// Buckets in spansB/bucketsB with an absolute upper limit ≤ threshold are ignored. +// If negative is true, the buckets in spansB/bucketsB are subtracted rather than added. +func addBuckets( + schema int32, threshold float64, negative bool, + spansA []Span, bucketsA []float64, + spansB []Span, bucketsB []float64, +) ([]Span, []float64) { var ( - targetSpans []Span // The spans in the target schema. - targetBuckets []float64 // The buckets in the target schema. - bucketIdx int32 // The index of bucket in the origin schema. - lastTargetBucketIdx int32 // The index of the last added target bucket. - origBucketIdx int // The position of a bucket in originBuckets slice. + iSpan = -1 + iBucket = -1 + iInSpan int32 + indexA int32 + indexB int32 + bIdxB int + bucketB float64 + deltaIndex int32 + lowerThanThreshold = true ) - for _, span := range originSpans { - // Determine the index of the first bucket in this span. - bucketIdx += span.Offset - for j := 0; j < int(span.Length); j++ { - // Determine the index of the bucket in the target schema from the index in the original schema. - targetBucketIdx := targetIdx(bucketIdx, originSchema, targetSchema) + for _, spanB := range spansB { + indexB += spanB.Offset + for j := 0; j < int(spanB.Length); j++ { + if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold { + goto nextLoop + } + lowerThanThreshold = false - switch { - case len(targetSpans) == 0: - // This is the first span in the targetSpans. - span := Span{ - Offset: targetBucketIdx, - Length: 1, + bucketB = bucketsB[bIdxB] + if negative { + bucketB *= -1 + } + + if iSpan == -1 { + if len(spansA) == 0 || spansA[0].Offset > indexB { + // Add bucket before all others. + bucketsA = append(bucketsA, 0) + copy(bucketsA[1:], bucketsA) + bucketsA[0] = bucketB + if len(spansA) > 0 && spansA[0].Offset == indexB+1 { + spansA[0].Length++ + spansA[0].Offset-- + goto nextLoop + } + spansA = append(spansA, Span{}) + copy(spansA[1:], spansA) + spansA[0] = Span{Offset: indexB, Length: 1} + if len(spansA) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spansA[1].Offset -= indexB + 1 + } + goto nextLoop + } else if spansA[0].Offset == indexB { + // Just add to first bucket. + bucketsA[0] += bucketB + goto nextLoop + } + iSpan, iBucket, iInSpan = 0, 0, 0 + indexA = spansA[0].Offset + } + deltaIndex = indexB - indexA + for { + remainingInSpan := int32(spansA[iSpan].Length) - iInSpan + if deltaIndex < remainingInSpan { + // Bucket is in current span. + iBucket += int(deltaIndex) + iInSpan += deltaIndex + bucketsA[iBucket] += bucketB + break } - targetSpans = append(targetSpans, span) - targetBuckets = append(targetBuckets, originBuckets[0]) - lastTargetBucketIdx = targetBucketIdx - - case lastTargetBucketIdx == targetBucketIdx: - // The current bucket has to be merged into the same target bucket as the previous bucket. - targetBuckets[len(targetBuckets)-1] += originBuckets[origBucketIdx] - - case (lastTargetBucketIdx + 1) == targetBucketIdx: - // The current bucket has to go into a new target bucket, - // and that bucket is next to the previous target bucket, - // so we add it to the current target span. - targetSpans[len(targetSpans)-1].Length++ - targetBuckets = append(targetBuckets, originBuckets[origBucketIdx]) - lastTargetBucketIdx++ - - case (lastTargetBucketIdx + 1) < targetBucketIdx: - // The current bucket has to go into a new target bucket, - // and that bucket is separated by a gap from the previous target bucket, - // so we need to add a new target span. - span := Span{ - Offset: targetBucketIdx - lastTargetBucketIdx - 1, - Length: 1, + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + bucketsA = append(bucketsA, 0) + copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) + bucketsA[iBucket] = bucketB + switch { + case deltaIndex == 0: + // Directly after previous span, extend previous span. + if iSpan < len(spansA) { + spansA[iSpan].Offset-- + } + iSpan-- + iInSpan = int32(spansA[iSpan].Length) + spansA[iSpan].Length++ + goto nextLoop + case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1: + // Directly before next span, extend next span. + iInSpan = 0 + spansA[iSpan].Offset-- + spansA[iSpan].Length++ + goto nextLoop + default: + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spansA) { + spansA[iSpan].Offset -= deltaIndex + 1 + } + spansA = append(spansA, Span{}) + copy(spansA[iSpan+1:], spansA[iSpan:]) + spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} + goto nextLoop + } + } else { + // Try start of next span. + deltaIndex -= spansA[iSpan].Offset + iInSpan = 0 } - targetSpans = append(targetSpans, span) - targetBuckets = append(targetBuckets, originBuckets[origBucketIdx]) - lastTargetBucketIdx = targetBucketIdx } - bucketIdx++ - origBucketIdx++ + nextLoop: + indexA = indexB + indexB++ + bIdxB++ } } - return targetSpans, targetBuckets + return spansA, bucketsA +} + +func FloatBucketsMatch(b1, b2 []float64) bool { + if len(b1) != len(b2) { + return false + } + for i, b := range b1 { + if math.Float64bits(b) != math.Float64bits(b2[i]) { + return false + } + } + return true +} + +// ReduceResolution reduces the float histogram's spans, buckets into target schema. +// The target schema must be smaller than the current float histogram's schema. +// This will panic if the histogram has custom buckets or if the target schema is +// a custom buckets schema. +func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { + if h.UsesCustomBuckets() { + panic("cannot reduce resolution when there are custom buckets") + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } + if targetSchema >= h.Schema { + panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) + } + + h.PositiveSpans, h.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, true) + h.NegativeSpans, h.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, true) + + h.Schema = targetSchema + return h } diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index dd3e30427e..cf370a313e 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -14,8 +14,9 @@ package histogram import ( - "fmt" "math" + "math/rand" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -130,6 +131,94 @@ func TestFloatHistogramMul(t *testing.T) { NegativeBuckets: []float64{9, 3, 15, 18}, }, }, + { + "negation", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -11, + Count: -30, + Sum: -23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-1, 0, -3, -4, -7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-3, -1, -5, -6}, + }, + }, + { + "negative multiplier", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -22, + Count: -60, + Sum: -46, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-2, 0, -6, -8, -14}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-6, -2, -10, -12}, + }, + }, + { + "no-op with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + 1, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + }, + { + "triple with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + 3, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 90, + Sum: 69, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{3, 0, 9, 12, 21}, + CustomValues: []float64{1, 2, 3, 4}, + }, + }, } for _, c := range cases { @@ -141,6 +230,148 @@ func TestFloatHistogramMul(t *testing.T) { } } +func TestFloatHistogramCopy(t *testing.T) { + cases := []struct { + name string + orig *FloatHistogram + expected *FloatHistogram + }{ + { + name: "without buckets", + orig: &FloatHistogram{}, + expected: &FloatHistogram{}, + }, + { + name: "with buckets", + orig: &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + NegativeSpans: []Span{{3, 2}}, + NegativeBuckets: []float64{5, 3, 1.234e5, 1000}, + }, + expected: &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + NegativeSpans: []Span{{3, 2}}, + NegativeBuckets: []float64{5, 3, 1.234e5, 1000}, + }, + }, + { + name: "with empty buckets and non empty capacity", + orig: &FloatHistogram{ + PositiveSpans: make([]Span, 0, 1), + PositiveBuckets: make([]float64, 0, 1), + NegativeSpans: make([]Span, 0, 1), + NegativeBuckets: make([]float64, 0, 1), + }, + expected: &FloatHistogram{}, + }, + { + name: "with custom buckets", + orig: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomValues: []float64{1, 2, 3}, + }, + expected: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomValues: []float64{1, 2, 3}, + }, + }, + } + + for _, tcase := range cases { + t.Run(tcase.name, func(t *testing.T) { + hCopy := tcase.orig.Copy() + + // Modify a primitive value in the original histogram. + tcase.orig.Sum++ + require.Equal(t, tcase.expected, hCopy) + assertDeepCopyFHSpans(t, tcase.orig, hCopy, tcase.expected) + }) + } +} + +func TestFloatHistogramCopyTo(t *testing.T) { + cases := []struct { + name string + orig *FloatHistogram + expected *FloatHistogram + }{ + { + name: "without buckets", + orig: &FloatHistogram{}, + expected: &FloatHistogram{}, + }, + { + name: "with buckets", + orig: &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + NegativeSpans: []Span{{3, 2}}, + NegativeBuckets: []float64{5, 3, 1.234e5, 1000}, + }, + expected: &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + NegativeSpans: []Span{{3, 2}}, + NegativeBuckets: []float64{5, 3, 1.234e5, 1000}, + }, + }, + { + name: "with empty buckets and non empty capacity", + orig: &FloatHistogram{ + PositiveSpans: make([]Span, 0, 1), + PositiveBuckets: make([]float64, 0, 1), + NegativeSpans: make([]Span, 0, 1), + NegativeBuckets: make([]float64, 0, 1), + }, + expected: &FloatHistogram{}, + }, + { + name: "with custom buckets", + orig: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomValues: []float64{1, 2, 3}, + }, + expected: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomValues: []float64{1, 2, 3}, + }, + }, + } + + for _, tcase := range cases { + t.Run(tcase.name, func(t *testing.T) { + hCopy := &FloatHistogram{} + tcase.orig.CopyTo(hCopy) + + // Modify a primitive value in the original histogram. + tcase.orig.Sum++ + require.Equal(t, tcase.expected, hCopy) + assertDeepCopyFHSpans(t, tcase.orig, hCopy, tcase.expected) + }) + } +} + +func assertDeepCopyFHSpans(t *testing.T, orig, hCopy, expected *FloatHistogram) { + // Do an in-place expansion of an original spans slice. + orig.PositiveSpans = expandSpans(orig.PositiveSpans) + orig.PositiveSpans[len(orig.PositiveSpans)-1] = Span{1, 2} + + hCopy.PositiveSpans = expandSpans(hCopy.PositiveSpans) + expected.PositiveSpans = expandSpans(expected.PositiveSpans) + // Expand the copy spans and assert that modifying the original has not affected the copy. + require.Equal(t, expected, hCopy) +} + func TestFloatHistogramDiv(t *testing.T) { cases := []struct { name string @@ -226,6 +457,94 @@ func TestFloatHistogramDiv(t *testing.T) { NegativeBuckets: []float64{1.5, 0.5, 2.5, 3}, }, }, + { + "negation", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + -1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -5.5, + Count: -3493.3, + Sum: -2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{-1, -3.3, -4.2, -0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-3.1, -3, -1.234e5, -1000}, + }, + }, + { + "negative half", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -5.5, + Count: -15, + Sum: -11.5, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-0.5, 0, -1.5, -2, -3.5}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-1.5, -0.5, -2.5, -3}, + }, + }, + { + "no-op with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + 1, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + }, + { + "half with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + 2, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 11.5, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0.5, 0, 1.5, 2, 3.5}, + CustomValues: []float64{1, 2, 3, 4}, + }, + }, } for _, c := range cases { @@ -824,6 +1143,138 @@ func TestFloatHistogramDetectReset(t *testing.T) { }, true, }, + { + "no buckets to some buckets with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + CustomValues: []float64{1, 2, 3}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + false, + }, + { + "some buckets to no buckets with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + CustomValues: []float64{1, 2, 3}, + }, + true, + }, + { + "one bucket appears, nothing else changes with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + false, + }, + { + "one bucket disappears, nothing else changes with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + true, + }, + { + "an unpopulated bucket disappears, nothing else changes with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + false, + }, + { + "one positive bucket goes up with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.3, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + false, + }, + { + "one positive bucket goes down with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.1, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + true, + }, } for _, c := range cases { @@ -938,6 +1389,21 @@ func TestFloatHistogramCompact(t *testing.T) { NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4}, }, }, + { + "cut empty buckets in the middle", + &FloatHistogram{ + PositiveSpans: []Span{{5, 4}}, + PositiveBuckets: []float64{1, 3, 0, 2}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{ + {Offset: 5, Length: 2}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{1, 3, 2}, + }, + }, { "cut empty buckets at start or end of spans, even in the middle", &FloatHistogram{ @@ -955,7 +1421,7 @@ func TestFloatHistogramCompact(t *testing.T) { }, }, { - "cut empty buckets at start or end but merge spans due to maxEmptyBuckets", + "cut empty buckets at start and end - also merge spans due to maxEmptyBuckets", &FloatHistogram{ PositiveSpans: []Span{{-4, 4}, {5, 3}}, PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3}, @@ -998,18 +1464,42 @@ func TestFloatHistogramCompact(t *testing.T) { PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, }, }, + { + "cut empty buckets from the middle of a span, avoiding none due to maxEmptyBuckets", + &FloatHistogram{ + PositiveSpans: []Span{{-2, 4}}, + PositiveBuckets: []float64{1, 0, 0, 3.3}, + }, + 1, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}, {2, 1}}, + PositiveBuckets: []float64{1, 3.3}, + }, + }, + { + "cut empty buckets and merge spans due to maxEmptyBuckets", + &FloatHistogram{ + PositiveSpans: []Span{{-2, 4}, {3, 1}}, + PositiveBuckets: []float64{1, 0, 0, 3.3, 4.2}, + }, + 1, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 1}}, + PositiveBuckets: []float64{1, 3.3, 4.2}, + }, + }, { "cut empty buckets from the middle of a span, avoiding some due to maxEmptyBuckets", &FloatHistogram{ - PositiveSpans: []Span{{-4, 6}, {3, 3}}, - PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3}, + PositiveSpans: []Span{{-4, 6}, {3, 3}, {10, 2}}, + PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3, 2, 3}, NegativeSpans: []Span{{0, 2}, {3, 5}}, NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4}, }, 1, &FloatHistogram{ - PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}}, - PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}, {10, 2}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3, 2, 3}, NegativeSpans: []Span{{0, 2}, {3, 5}}, NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4}, }, @@ -1078,6 +1568,70 @@ func TestFloatHistogramCompact(t *testing.T) { NegativeBuckets: []float64{2, 3}, }, }, + { + "nothing should happen with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + }, + { + "eliminate zero offsets with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}, {0, 3}, {0, 1}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomValues: []float64{1, 2, 3, 4}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 5}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomValues: []float64{1, 2, 3, 4}, + }, + }, + { + "eliminate multiple zero length spans with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomValues: []float64{1, 2, 3, 4}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 2}, {9, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomValues: []float64{1, 2, 3, 4}, + }, + }, + { + "cut empty buckets at start and end with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 4}, {5, 6}}, + PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3, 0, 0, 0}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{2, 2}, {5, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }, + }, } for _, c := range cases { @@ -1093,6 +1647,7 @@ func TestFloatHistogramAdd(t *testing.T) { cases := []struct { name string in1, in2, expected *FloatHistogram + expErrMsg string }{ { "same bucket layout", @@ -1126,6 +1681,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 2}, {3, 2}}, NegativeBuckets: []float64{4, 2, 9, 10}, }, + "", }, { "same bucket layout, defined differently", @@ -1159,6 +1715,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 5}, {0, 2}}, NegativeBuckets: []float64{4, 2, 0, 0, 0, 9, 10}, }, + "", }, { "non-overlapping spans", @@ -1192,9 +1749,10 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, }, + "", }, { - "non-overlapping inverted order", + "non-overlapping spans inverted order", &FloatHistogram{ ZeroThreshold: 0.01, ZeroCount: 8, @@ -1202,7 +1760,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 1.234, PositiveSpans: []Span{{0, 2}, {3, 3}}, PositiveBuckets: []float64{5, 4, 2, 3, 6}, - NegativeSpans: []Span{{-9, 2}, {3, 2}}, + NegativeSpans: []Span{{-6, 2}, {1, 2}}, NegativeBuckets: []float64{1, 1, 4, 4}, }, &FloatHistogram{ @@ -1222,9 +1780,10 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 3.579, PositiveSpans: []Span{{-2, 2}, {0, 5}, {0, 3}}, PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, - NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, + NegativeSpans: []Span{{-6, 2}, {1, 2}, {4, 2}, {3, 2}}, NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, }, + "", }, { "overlapping spans", @@ -1258,6 +1817,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "overlapping spans inverted order", @@ -1291,6 +1851,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "schema change", @@ -1326,6 +1887,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero bucket in first histogram", @@ -1359,6 +1921,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero bucket in second histogram", @@ -1392,6 +1955,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero threshold in first histogram ends up inside a populated bucket of second histogram", @@ -1425,6 +1989,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero threshold in second histogram ends up inside a populated bucket of first histogram", @@ -1458,6 +2023,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "schema change combined with larger zero bucket in second histogram", @@ -1493,6 +2059,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "schema change combined with larger zero bucket in first histogram", @@ -1528,24 +2095,270 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", + }, + { + "same custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 26, + Sum: 3.579, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 5, 7, 13}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "same custom bucket layout, defined differently", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {1, 1}, {0, 2}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {1, 2}, {0, 1}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 26, + Sum: 3.579, + PositiveSpans: []Span{{0, 2}, {1, 1}, {0, 2}}, + PositiveBuckets: []float64{1, 0, 5, 7, 13}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "non-overlapping spans with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 20, + Sum: 1.234, + PositiveSpans: []Span{{2, 2}, {3, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 35, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 6}}, + PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "non-overlapping spans inverted order with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 20, + Sum: 1.234, + PositiveSpans: []Span{{2, 2}, {3, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 35, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 6}}, + PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "overlapping spans with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 27, + Sum: 1.234, + PositiveSpans: []Span{{1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 42, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 4}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "overlapping spans inverted order with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 27, + Sum: 1.234, + PositiveSpans: []Span{{1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 42, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 4}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "different custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4, 5}, + }, + nil, + "cannot apply this operation on custom buckets histograms with different custom bounds", + }, + { + "mix exponential and custom buckets histograms", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 59, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{2, 5, 4, 2, 3, 6, 7, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{4, 10, 1, 4, 14, 7}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + nil, + "cannot apply this operation on histograms with a mix of exponential and custom bucket schemas", }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - require.Equal(t, c.expected, c.in1.Add(c.in2)) - // Has it also happened in-place? - require.Equal(t, c.expected, c.in1) + testHistogramAdd(t, c.in1, c.in2, c.expected, c.expErrMsg) + testHistogramAdd(t, c.in2, c.in1, c.expected, c.expErrMsg) }) } } +func testHistogramAdd(t *testing.T, a, b, expected *FloatHistogram, expErrMsg string) { + var ( + aCopy = a.Copy() + bCopy = b.Copy() + expectedCopy *FloatHistogram + ) + + if expected != nil { + expectedCopy = expected.Copy() + } + + res, err := aCopy.Add(bCopy) + if expErrMsg != "" { + require.EqualError(t, err, expErrMsg) + } else { + require.NoError(t, err) + } + + if expected != nil { + res.Compact(0) + expectedCopy.Compact(0) + + require.Equal(t, expectedCopy, res) + + // Has it also happened in-place? + require.Equal(t, expectedCopy, aCopy) + + // Check that the argument was not mutated. + require.Equal(t, b, bCopy) + } +} + func TestFloatHistogramSub(t *testing.T) { // This has fewer test cases than TestFloatHistogramAdd because Add and // Sub share most of the trickier code. cases := []struct { name string in1, in2, expected *FloatHistogram + expErrMsg string }{ { "same bucket layout", @@ -1579,6 +2392,7 @@ func TestFloatHistogramSub(t *testing.T) { NegativeSpans: []Span{{3, 2}, {3, 2}}, NegativeBuckets: []float64{2, 0, 1, 2}, }, + "", }, { "schema change", @@ -1614,18 +2428,128 @@ func TestFloatHistogramSub(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{1, 9, 1, 4, 9, 1}, }, + "", + }, + { + "same custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 4, + Sum: 11, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 1, 1, 1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "different custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4, 5}, + }, + nil, + "cannot apply this operation on custom buckets histograms with different custom bounds", + }, + { + "mix exponential and custom buckets histograms", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 59, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{2, 5, 4, 2, 3, 6, 7, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{4, 10, 1, 4, 14, 7}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + nil, + "cannot apply this operation on histograms with a mix of exponential and custom bucket schemas", }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - require.Equal(t, c.expected, c.in1.Sub(c.in2)) - // Has it also happened in-place? - require.Equal(t, c.expected, c.in1) + testFloatHistogramSub(t, c.in1, c.in2, c.expected, c.expErrMsg) + + var expectedNegative *FloatHistogram + if c.expected != nil { + expectedNegative = c.expected.Copy().Mul(-1) + } + testFloatHistogramSub(t, c.in2, c.in1, expectedNegative, c.expErrMsg) }) } } +func testFloatHistogramSub(t *testing.T, a, b, expected *FloatHistogram, expErrMsg string) { + var ( + aCopy = a.Copy() + bCopy = b.Copy() + expectedCopy *FloatHistogram + ) + + if expected != nil { + expectedCopy = expected.Copy() + } + + res, err := aCopy.Sub(bCopy) + if expErrMsg != "" { + require.EqualError(t, err, expErrMsg) + } else { + require.NoError(t, err) + } + + if expected != nil { + res.Compact(0) + expectedCopy.Compact(0) + + require.Equal(t, expectedCopy, res) + + // Has it also happened in-place? + require.Equal(t, expectedCopy, aCopy) + + // Check that the argument was not mutated. + require.Equal(t, b, bCopy) + } +} + func TestFloatHistogramCopyToSchema(t *testing.T) { cases := []struct { name string @@ -1684,11 +2608,34 @@ func TestFloatHistogramCopyToSchema(t *testing.T) { NegativeBuckets: []float64{3, 1, 5, 6}, }, }, + { + "no schema change for custom buckets", + CustomBucketsSchema, + &FloatHistogram{ + Count: 30, + Sum: 2.345, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7}, + }, + &FloatHistogram{ + Count: 30, + Sum: 2.345, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7}, + }, + }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { + inCopy := c.in.Copy() require.Equal(t, c.expected, c.in.CopyToSchema(c.targetSchema)) + // Check that the receiver histogram was not mutated: + require.Equal(t, inCopy, c.in) }) } } @@ -1733,8 +2680,8 @@ func TestReverseFloatBucketIterator(t *testing.T) { for it.Next() { actBuckets = append(actBuckets, it.At()) } - require.Greater(t, len(expBuckets), 0) - require.Greater(t, len(actBuckets), 0) + require.NotEmpty(t, expBuckets) + require.NotEmpty(t, actBuckets) require.Equal(t, expBuckets, actBuckets) // Negative buckets. @@ -1749,8 +2696,8 @@ func TestReverseFloatBucketIterator(t *testing.T) { for it.Next() { actBuckets = append(actBuckets, it.At()) } - require.Greater(t, len(expBuckets), 0) - require.Greater(t, len(actBuckets), 0) + require.NotEmpty(t, expBuckets) + require.NotEmpty(t, actBuckets) require.Equal(t, expBuckets, actBuckets) } @@ -1937,7 +2884,7 @@ func TestAllFloatBucketIterator(t *testing.T) { } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { var expBuckets, actBuckets []Bucket[float64] if c.includeNeg { @@ -2163,7 +3110,7 @@ func TestAllReverseFloatBucketIterator(t *testing.T) { } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { var expBuckets, actBuckets []Bucket[float64] if c.includePos { @@ -2252,3 +3199,412 @@ func TestFloatBucketIteratorTargetSchema(t *testing.T) { } require.False(t, it.Next(), "negative iterator not exhausted") } + +func TestFloatCustomBucketsIterators(t *testing.T) { + cases := []struct { + h FloatHistogram + expPositiveBuckets []Bucket[float64] + }{ + { + h: FloatHistogram{ + Count: 622, + Sum: 10008.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{100, 344, 123, 55}, + CustomValues: []float64{10, 25, 50, 100, 500}, + }, + expPositiveBuckets: []Bucket[float64]{ + {Lower: math.Inf(-1), Upper: 10, LowerInclusive: true, UpperInclusive: true, Count: 100, Index: 0}, + {Lower: 10, Upper: 25, LowerInclusive: false, UpperInclusive: true, Count: 344, Index: 1}, + {Lower: 50, Upper: 100, LowerInclusive: false, UpperInclusive: true, Count: 123, Index: 3}, + {Lower: 500, Upper: math.Inf(1), LowerInclusive: false, UpperInclusive: true, Count: 55, Index: 5}, + }, + }, + { + h: FloatHistogram{ + Count: 622, + Sum: 10008.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{100, 344, 123, 55}, + CustomValues: []float64{-10, -5, 0, 10, 25}, + }, + expPositiveBuckets: []Bucket[float64]{ + {Lower: math.Inf(-1), Upper: -10, LowerInclusive: true, UpperInclusive: true, Count: 100, Index: 0}, + {Lower: -10, Upper: -5, LowerInclusive: false, UpperInclusive: true, Count: 344, Index: 1}, + {Lower: 0, Upper: 10, LowerInclusive: false, UpperInclusive: true, Count: 123, Index: 3}, + {Lower: 25, Upper: math.Inf(1), LowerInclusive: false, UpperInclusive: true, Count: 55, Index: 5}, + }, + }, + } + + for i, c := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + { + it := c.h.AllBucketIterator() + for i, b := range c.expPositiveBuckets { + require.True(t, it.Next(), "all bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "all bucket iterator not exhausted") + + it = c.h.AllReverseBucketIterator() + length := len(c.expPositiveBuckets) + for j := 0; j < length; j++ { + i := length - j - 1 + b := c.expPositiveBuckets[i] + require.True(t, it.Next(), "all reverse bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "all reverse bucket iterator not exhausted") + + it = c.h.PositiveBucketIterator() + for i, b := range c.expPositiveBuckets { + require.True(t, it.Next(), "positive bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "positive bucket iterator not exhausted") + + it = c.h.PositiveReverseBucketIterator() + for j := 0; j < length; j++ { + i := length - j - 1 + b := c.expPositiveBuckets[i] + require.True(t, it.Next(), "positive reverse bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "positive reverse bucket iterator not exhausted") + + it = c.h.NegativeBucketIterator() + require.False(t, it.Next(), "negative bucket iterator not exhausted") + + it = c.h.NegativeReverseBucketIterator() + require.False(t, it.Next(), "negative reverse bucket iterator not exhausted") + } + { + it := c.h.floatBucketIterator(true, 0, CustomBucketsSchema) + for i, b := range c.expPositiveBuckets { + require.True(t, it.Next(), "positive iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "positive iterator not exhausted") + + it = c.h.floatBucketIterator(false, 0, CustomBucketsSchema) + require.False(t, it.Next(), "negative iterator not exhausted") + } + }) + } +} + +// TestFloatHistogramEquals tests FloatHistogram with float-specific cases that +// cannot be covered by TestHistogramEquals. +func TestFloatHistogramEquals(t *testing.T) { + h1 := FloatHistogram{ + Schema: 3, + Count: 2.2, + Sum: 9.7, + ZeroThreshold: 0.1, + ZeroCount: 1.1, + PositiveBuckets: []float64{3}, + NegativeBuckets: []float64{4}, + } + + equals := func(h1, h2 FloatHistogram) { + require.True(t, h1.Equals(&h2)) + require.True(t, h2.Equals(&h1)) + } + notEquals := func(h1, h2 FloatHistogram) { + require.False(t, h1.Equals(&h2)) + require.False(t, h2.Equals(&h1)) + } + + h2 := h1.Copy() + equals(h1, *h2) + + // Count is NaN (but not a StaleNaN). + hCountNaN := h1.Copy() + hCountNaN.Count = math.NaN() + notEquals(h1, *hCountNaN) + equals(*hCountNaN, *hCountNaN) + + // ZeroCount is NaN (but not a StaleNaN). + hZeroCountNaN := h1.Copy() + hZeroCountNaN.ZeroCount = math.NaN() + notEquals(h1, *hZeroCountNaN) + equals(*hZeroCountNaN, *hZeroCountNaN) + + // Positive bucket value is NaN. + hPosBucketNaN := h1.Copy() + hPosBucketNaN.PositiveBuckets[0] = math.NaN() + notEquals(h1, *hPosBucketNaN) + equals(*hPosBucketNaN, *hPosBucketNaN) + + // Negative bucket value is NaN. + hNegBucketNaN := h1.Copy() + hNegBucketNaN.NegativeBuckets[0] = math.NaN() + notEquals(h1, *hNegBucketNaN) + equals(*hNegBucketNaN, *hNegBucketNaN) + + // Custom bounds are defined for exponential schema. + hCustom := h1.Copy() + hCustom.CustomValues = []float64{1, 2, 3} + equals(h1, *hCustom) + + cbh1 := FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 2.2, + Sum: 9.7, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{3}, + CustomValues: []float64{1, 2, 3}, + } + + require.NoError(t, cbh1.Validate()) + + cbh2 := cbh1.Copy() + equals(cbh1, *cbh2) + + // Has different custom bounds for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.CustomValues = []float64{1, 2, 3, 4} + notEquals(cbh1, *cbh2) + + // Has non-empty negative spans and buckets for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.NegativeSpans = []Span{{Offset: 0, Length: 1}} + cbh2.NegativeBuckets = []float64{1} + notEquals(cbh1, *cbh2) + + // Has non-zero zero count and threshold for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.ZeroThreshold = 0.1 + cbh2.ZeroCount = 10 + notEquals(cbh1, *cbh2) +} + +func TestFloatHistogramSize(t *testing.T) { + cases := []struct { + name string + fh *FloatHistogram + expected int + }{ + { + "without spans and buckets", + &FloatHistogram{ // 8 bytes. + CounterResetHint: 0, // 1 byte. + Schema: 1, // 4 bytes. + ZeroThreshold: 0.01, // 8 bytes. + ZeroCount: 5.5, // 8 bytes. + Count: 3493.3, // 8 bytes. + Sum: 2349209.324, // 8 bytes. + PositiveSpans: nil, // 24 bytes. + PositiveBuckets: nil, // 24 bytes. + NegativeSpans: nil, // 24 bytes. + NegativeBuckets: nil, // 24 bytes. + CustomValues: nil, // 24 bytes. + }, + 8 + 4 + 4 + 8 + 8 + 8 + 8 + 24 + 24 + 24 + 24 + 24, + }, + { + "complete struct", + &FloatHistogram{ // 8 bytes. + CounterResetHint: 0, // 1 byte. + Schema: 1, // 4 bytes. + ZeroThreshold: 0.01, // 8 bytes. + ZeroCount: 5.5, // 8 bytes. + Count: 3493.3, // 8 bytes. + Sum: 2349209.324, // 8 bytes. + PositiveSpans: []Span{ // 24 bytes. + {-2, 1}, // 2 * 4 bytes. + {2, 3}, // 2 * 4 bytes. + }, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, // 24 bytes + 4 * 8 bytes. + NegativeSpans: []Span{ // 24 bytes. + {3, 2}, // 2 * 4 bytes. + {3, 2}}, // 2 * 4 bytes. + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, // 24 bytes + 4 * 8 bytes. + CustomValues: nil, // 24 bytes. + }, + 8 + 4 + 4 + 8 + 8 + 8 + 8 + (24 + 2*4 + 2*4) + (24 + 2*4 + 2*4) + (24 + 4*8) + (24 + 4*8) + 24, + }, + { + "complete struct with custom buckets", + &FloatHistogram{ // 8 bytes. + CounterResetHint: 0, // 1 byte. + Schema: CustomBucketsSchema, // 4 bytes. + ZeroThreshold: 0, // 8 bytes. + ZeroCount: 0, // 8 bytes. + Count: 3493.3, // 8 bytes. + Sum: 2349209.324, // 8 bytes. + PositiveSpans: []Span{ // 24 bytes. + {0, 1}, // 2 * 4 bytes. + {2, 3}, // 2 * 4 bytes. + }, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, // 24 bytes + 4 * 8 bytes. + NegativeSpans: nil, // 24 bytes. + NegativeBuckets: nil, // 24 bytes. + CustomValues: []float64{1, 2, 3}, // 24 bytes + 3 * 8 bytes. + }, + 8 + 4 + 4 + 8 + 8 + 8 + 8 + (24 + 2*4 + 2*4) + (24 + 4*8) + 24 + 24 + (24 + 3*8), + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.Equal(t, c.expected, c.fh.Size()) + }) + } +} + +func TestFloatHistogramString(t *testing.T) { + cases := []struct { + name string + fh *FloatHistogram + expected string + }{ + { + "exponential histogram", + &FloatHistogram{ + Schema: 1, + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{ + {-2, 1}, + {2, 3}, + }, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{ + {3, 2}, + {3, 2}, + }, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + `{count:3493.3, sum:2.349209324e+06, [-22.62741699796952,-16):1000, [-16,-11.31370849898476):123400, [-4,-2.82842712474619):3, [-2.82842712474619,-2):3.1, [-0.01,0.01]:5.5, (0.35355339059327373,0.5]:1, (1,1.414213562373095]:3.3, (1.414213562373095,2]:4.2, (2,2.82842712474619]:0.1}`, + }, + { + "custom buckets histogram", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{ + {0, 1}, + {2, 4}, + }, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 5}, + CustomValues: []float64{1, 2, 5, 10, 15, 20}, + }, + `{count:3493.3, sum:2.349209324e+06, [-Inf,1]:1, (5,10]:3.3, (10,15]:4.2, (15,20]:0.1, (20,+Inf]:5}`, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.NoError(t, c.fh.Validate()) + require.Equal(t, c.expected, c.fh.String()) + }) + } +} + +func BenchmarkFloatHistogramAllBucketIterator(b *testing.B) { + rng := rand.New(rand.NewSource(0)) + + fh := createRandomFloatHistogram(rng, 50) + + b.ReportAllocs() // the current implementation reports 1 alloc + b.ResetTimer() + + for n := 0; n < b.N; n++ { + for it := fh.AllBucketIterator(); it.Next(); { + } + } +} + +func BenchmarkFloatHistogramDetectReset(b *testing.B) { + rng := rand.New(rand.NewSource(0)) + + fh := createRandomFloatHistogram(rng, 50) + + b.ReportAllocs() // the current implementation reports 0 allocs + b.ResetTimer() + + for n := 0; n < b.N; n++ { + // Detect against the itself (no resets is the worst case input). + fh.DetectReset(fh) + } +} + +func createRandomFloatHistogram(rng *rand.Rand, spanNum int32) *FloatHistogram { + f := &FloatHistogram{} + f.PositiveSpans, f.PositiveBuckets = createRandomSpans(rng, spanNum) + f.NegativeSpans, f.NegativeBuckets = createRandomSpans(rng, spanNum) + return f +} + +func createRandomSpans(rng *rand.Rand, spanNum int32) ([]Span, []float64) { + Spans := make([]Span, spanNum) + Buckets := make([]float64, 0) + for i := 0; i < int(spanNum); i++ { + Spans[i].Offset = rng.Int31n(spanNum) + 1 + Spans[i].Length = uint32(rng.Int31n(spanNum) + 1) + for j := 0; j < int(Spans[i].Length); j++ { + Buckets = append(Buckets, float64(rng.Int31n(spanNum)+1)) + } + } + return Spans, Buckets +} + +func TestFloatHistogramReduceResolution(t *testing.T) { + tcs := map[string]struct { + origin *FloatHistogram + target *FloatHistogram + }{ + "valid float histogram": { + origin: &FloatHistogram{ + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + PositiveBuckets: []float64{1, 3, 1, 2, 1, 1}, + NegativeSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + NegativeBuckets: []float64{1, 3, 1, 2, 1, 1}, + }, + target: &FloatHistogram{ + Schema: -1, + PositiveSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{1, 4, 2, 2}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []float64{1, 4, 2, 2}, + }, + }, + } + + for _, tc := range tcs { + target := tc.origin.ReduceResolution(tc.target.Schema) + require.Equal(t, tc.target, target) + // Check that receiver histogram was mutated: + require.Equal(t, tc.target, tc.origin) + } +} diff --git a/model/histogram/generic.go b/model/histogram/generic.go index dad54cb069..a36b58d069 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -14,11 +14,39 @@ package histogram import ( + "errors" "fmt" "math" "strings" ) +const ( + ExponentialSchemaMax int32 = 8 + ExponentialSchemaMin int32 = -4 + CustomBucketsSchema int32 = -53 +) + +var ( + ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") + ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") + ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") + ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") + ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") + ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few") + ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order") + ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite") + ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas") + ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds") +) + +func IsCustomBucketsSchema(s int32) bool { + return s == CustomBucketsSchema +} + +func IsExponentialSchema(s int32) bool { + return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax +} + // BucketCount is a type constraint for the count in a bucket, which can be // float64 (for type FloatHistogram) or uint64 (for type Histogram). type BucketCount interface { @@ -31,7 +59,7 @@ type BucketCount interface { // absolute counts directly). Go type parameters don't allow type // specialization. Therefore, where special treatment of deltas between buckets // vs. absolute counts is important, this information has to be provided as a -// separate boolean parameter "deltaBuckets" +// separate boolean parameter "deltaBuckets". type InternalBucketCount interface { float64 | int64 } @@ -53,6 +81,13 @@ type Bucket[BC BucketCount] struct { Index int32 } +// strippedBucket is Bucket without bound values (which are expensive to calculate +// and not used in certain use cases). +type strippedBucket[BC BucketCount] struct { + count BC + index int32 +} + // String returns a string representation of a Bucket, using the usual // mathematical notation of '['/']' for inclusive bounds and '('/')' for // non-inclusive bounds. @@ -99,31 +134,45 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { currCount IBC // Count in the current bucket. currIdx int32 // The actual bucket index. + + customValues []float64 // Bounds (usually upper) for histograms with custom buckets. } -func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] { +func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] { return b.at(b.schema) } -// at is an internal version of the exported At to enable using a different -// schema. -func (b baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { +// at is an internal version of the exported At to enable using a different schema. +func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { bucket := Bucket[BC]{ Count: BC(b.currCount), Index: b.currIdx, } if b.positive { - bucket.Upper = getBound(b.currIdx, schema) - bucket.Lower = getBound(b.currIdx-1, schema) + bucket.Upper = getBound(b.currIdx, schema, b.customValues) + bucket.Lower = getBound(b.currIdx-1, schema, b.customValues) + } else { + bucket.Lower = -getBound(b.currIdx, schema, b.customValues) + bucket.Upper = -getBound(b.currIdx-1, schema, b.customValues) + } + if IsCustomBucketsSchema(schema) { + bucket.LowerInclusive = b.currIdx == 0 + bucket.UpperInclusive = true } else { - bucket.Lower = -getBound(b.currIdx, schema) - bucket.Upper = -getBound(b.currIdx-1, schema) + bucket.LowerInclusive = bucket.Lower < 0 + bucket.UpperInclusive = bucket.Upper > 0 } - bucket.LowerInclusive = bucket.Lower < 0 - bucket.UpperInclusive = bucket.Upper > 0 return bucket } +// strippedAt returns current strippedBucket (which lacks bucket bounds but is cheaper to compute). +func (b *baseBucketIterator[BC, IBC]) strippedAt() strippedBucket[BC] { + return strippedBucket[BC]{ + count: BC(b.currCount), + index: b.currIdx, + } +} + // compactBuckets is a generic function used by both Histogram.Compact and // FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are // deltas. Set it to false if the buckets contain absolute counts. @@ -333,19 +382,92 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp return buckets, spans } -func bucketsMatch[IBC InternalBucketCount](b1, b2 []IBC) bool { - if len(b1) != len(b2) { - return false +func checkHistogramSpans(spans []Span, numBuckets int) error { + var spanBuckets int + for n, span := range spans { + if n > 0 && span.Offset < 0 { + return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset) + } + spanBuckets += int(span.Length) + } + if spanBuckets != numBuckets { + return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch) + } + return nil +} + +func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IBC, count *BC, deltas bool) error { + if len(buckets) == 0 { + return nil + } + + var last IBC + for i := 0; i < len(buckets); i++ { + var c IBC + if deltas { + c = last + buckets[i] + } else { + c = buckets[i] + } + if c < 0 { + return fmt.Errorf("bucket number %d has observation count of %v: %w", i+1, c, ErrHistogramNegativeBucketCount) + } + last = c + *count += BC(c) + } + + return nil +} + +func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error { + prev := math.Inf(-1) + for _, curr := range bounds { + if curr <= prev { + return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid) + } + prev = curr + } + if prev == math.Inf(1) { + return fmt.Errorf("last +Inf bound must not be explicitly defined: %w", ErrHistogramCustomBucketsInfinite) + } + + var spanBuckets int + var totalSpanLength int + for n, span := range spans { + if span.Offset < 0 { + return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset) + } + spanBuckets += int(span.Length) + totalSpanLength += int(span.Length) + int(span.Offset) + } + if spanBuckets != numBuckets { + return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch) } - for i, b := range b1 { - if b != b2[i] { - return false + if (len(bounds) + 1) < totalSpanLength { + return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch) + } + + return nil +} + +func getBound(idx, schema int32, customValues []float64) float64 { + if IsCustomBucketsSchema(schema) { + length := int32(len(customValues)) + switch { + case idx > length || idx < -1: + panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length)) + case idx == length: + return math.Inf(1) + case idx == -1: + return math.Inf(-1) + default: + return customValues[idx] } } - return true + return getBoundExponential(idx, schema) } -func getBound(idx, schema int32) float64 { +func getBoundExponential(idx, schema int32) float64 { // Here a bit of context about the behavior for the last bucket counting // regular numbers (called simply "last bucket" below) and the bucket // counting observations of ±Inf (called "inf bucket" below, with an idx @@ -374,7 +496,7 @@ func getBound(idx, schema int32) float64 { // bucket results in precisely that. It is either frac=1.0 & exp=1024 // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, // by the way, a power of two where the exponent itself is a power of - // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // two, 2¹⁰ in fact, which coincides with a bucket boundary in all // schemas.) So these are the special cases we have to catch below. if schema < 0 { exp := int(idx) << -schema @@ -552,3 +674,113 @@ var exponentialBounds = [][]float64{ 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, }, } + +// reduceResolution reduces the input spans, buckets in origin schema to the spans, buckets in target schema. +// The target schema must be smaller than the original schema. +// Set deltaBuckets to true if the provided buckets are +// deltas. Set it to false if the buckets contain absolute counts. +// Set inplace to true to reuse input slices and avoid allocations (otherwise +// new slices will be allocated for result). +func reduceResolution[IBC InternalBucketCount]( + originSpans []Span, + originBuckets []IBC, + originSchema, + targetSchema int32, + deltaBuckets bool, + inplace bool, +) ([]Span, []IBC) { + var ( + targetSpans []Span // The spans in the target schema. + targetBuckets []IBC // The bucket counts in the target schema. + bucketIdx int32 // The index of bucket in the origin schema. + bucketCountIdx int // The position of a bucket in origin bucket count slice `originBuckets`. + targetBucketIdx int32 // The index of bucket in the target schema. + lastBucketCount IBC // The last visited bucket's count in the origin schema. + lastTargetBucketIdx int32 // The index of the last added target bucket. + lastTargetBucketCount IBC + ) + + if inplace { + // Slice reuse is safe because when reducing the resolution, + // target slices don't grow faster than origin slices are being read. + targetSpans = originSpans[:0] + targetBuckets = originBuckets[:0] + } + + for _, span := range originSpans { + // Determine the index of the first bucket in this span. + bucketIdx += span.Offset + for j := 0; j < int(span.Length); j++ { + // Determine the index of the bucket in the target schema from the index in the original schema. + targetBucketIdx = targetIdx(bucketIdx, originSchema, targetSchema) + + switch { + case len(targetSpans) == 0: + // This is the first span in the targetSpans. + span := Span{ + Offset: targetBucketIdx, + Length: 1, + } + targetSpans = append(targetSpans, span) + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + lastTargetBucketIdx = targetBucketIdx + lastBucketCount = originBuckets[bucketCountIdx] + lastTargetBucketCount = originBuckets[bucketCountIdx] + + case lastTargetBucketIdx == targetBucketIdx: + // The current bucket has to be merged into the same target bucket as the previous bucket. + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets[len(targetBuckets)-1] += lastBucketCount + lastTargetBucketCount += lastBucketCount + } else { + targetBuckets[len(targetBuckets)-1] += originBuckets[bucketCountIdx] + } + + case (lastTargetBucketIdx + 1) == targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is next to the previous target bucket, + // so we add it to the current target span. + targetSpans[len(targetSpans)-1].Length++ + lastTargetBucketIdx++ + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount) + lastTargetBucketCount = lastBucketCount + } else { + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + } + + case (lastTargetBucketIdx + 1) < targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is separated by a gap from the previous target bucket, + // so we need to add a new target span. + span := Span{ + Offset: targetBucketIdx - lastTargetBucketIdx - 1, + Length: 1, + } + targetSpans = append(targetSpans, span) + lastTargetBucketIdx = targetBucketIdx + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount) + lastTargetBucketCount = lastBucketCount + } else { + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + } + } + + bucketIdx++ + bucketCountIdx++ + } + } + + return targetSpans, targetBuckets +} + +func clearIfNotNil[T any](items []T) []T { + if items == nil { + return nil + } + return items[:0] +} diff --git a/model/histogram/generic_test.go b/model/histogram/generic_test.go index 55015c047f..b49adbcadf 100644 --- a/model/histogram/generic_test.go +++ b/model/histogram/generic_test.go @@ -15,12 +15,13 @@ package histogram import ( "math" + "slices" "testing" "github.com/stretchr/testify/require" ) -func TestGetBound(t *testing.T) { +func TestGetBoundExponential(t *testing.T) { scenarios := []struct { idx int32 schema int32 @@ -104,9 +105,105 @@ func TestGetBound(t *testing.T) { } for _, s := range scenarios { - got := getBound(s.idx, s.schema) + got := getBoundExponential(s.idx, s.schema) if s.want != got { require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema) } } } + +func TestReduceResolutionHistogram(t *testing.T) { + cases := []struct { + spans []Span + buckets []int64 + schema int32 + targetSchema int32 + expectedSpans []Span + expectedBuckets []int64 + }{ + { + spans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + buckets: []int64{1, 2, -2, 1, -1, 0}, + schema: 0, + targetSchema: -1, + expectedSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + expectedBuckets: []int64{1, 3, -2, 0}, + // schema 0, base 2 { (0.5, 1]:1 (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:0, (16,32]:0, (32,64]:0, (64,128]:1, (128,256]:1}", + // schema 1, base 4 { (0.25, 1):1 (1,4]:4, (4,16]:2, (16,64]:0, (64,256]:2} + }, + } + + for _, tc := range cases { + spansCopy, bucketsCopy := slices.Clone(tc.spans), slices.Clone(tc.buckets) + spans, buckets := reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, true, false) + require.Equal(t, tc.expectedSpans, spans) + require.Equal(t, tc.expectedBuckets, buckets) + // Verify inputs were not mutated: + require.Equal(t, spansCopy, tc.spans) + require.Equal(t, bucketsCopy, tc.buckets) + + // Output slices reuse input slices: + const inplace = true + spans, buckets = reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, true, inplace) + require.Equal(t, tc.expectedSpans, spans) + require.Equal(t, tc.expectedBuckets, buckets) + // Verify inputs were mutated which is now expected: + require.Equal(t, spans, tc.spans[:len(spans)]) + require.Equal(t, buckets, tc.buckets[:len(buckets)]) + } +} + +func TestReduceResolutionFloatHistogram(t *testing.T) { + cases := []struct { + spans []Span + buckets []float64 + schema int32 + targetSchema int32 + expectedSpans []Span + expectedBuckets []float64 + }{ + { + spans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + buckets: []float64{1, 3, 1, 2, 1, 1}, + schema: 0, + targetSchema: -1, + expectedSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + expectedBuckets: []float64{1, 4, 2, 2}, + // schema 0, base 2 { (0.5, 1]:1 (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:0, (16,32]:0, (32,64]:0, (64,128]:1, (128,256]:1}", + // schema 1, base 4 { (0.25, 1):1 (1,4]:4, (4,16]:2, (16,64]:0, (64,256]:2} + }, + } + + for _, tc := range cases { + spansCopy, bucketsCopy := slices.Clone(tc.spans), slices.Clone(tc.buckets) + spans, buckets := reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, false, false) + require.Equal(t, tc.expectedSpans, spans) + require.Equal(t, tc.expectedBuckets, buckets) + // Verify inputs were not mutated: + require.Equal(t, spansCopy, tc.spans) + require.Equal(t, bucketsCopy, tc.buckets) + + // Output slices reuse input slices: + const inplace = true + spans, buckets = reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, false, inplace) + require.Equal(t, tc.expectedSpans, spans) + require.Equal(t, tc.expectedBuckets, buckets) + // Verify inputs were mutated which is now expected: + require.Equal(t, spans, tc.spans[:len(spans)]) + require.Equal(t, buckets, tc.buckets[:len(buckets)]) + } +} diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index 6d425307c5..e4b99ec420 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -16,6 +16,7 @@ package histogram import ( "fmt" "math" + "slices" "strings" ) @@ -48,11 +49,12 @@ const ( type Histogram struct { // Counter reset information. CounterResetHint CounterResetHint - // Currently valid schema numbers are -4 <= n <= 8. They are all for - // base-2 bucket schemas, where 1 is a bucket boundary in each case, and - // then each power of two is divided into 2^n logarithmic buckets. Or - // in other words, each bucket boundary is the previous boundary times - // 2^(2^-n). + // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets, + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in + // each case, and then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by + // the CustomValues field. Schema int32 // Width of the zero bucket. ZeroThreshold float64 @@ -68,6 +70,12 @@ type Histogram struct { // count. All following ones are deltas relative to the previous // element. PositiveBuckets, NegativeBuckets []int64 + // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. + // This slice is interned, to be treated as immutable and copied by reference. + // These numbers should be strictly increasing. This field is only used when the + // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans + // and NegativeBuckets fields are not used in that case. + CustomValues []float64 } // A Span defines a continuous sequence of buckets. @@ -79,30 +87,87 @@ type Span struct { Length uint32 } +func (h *Histogram) UsesCustomBuckets() bool { + return IsCustomBucketsSchema(h.Schema) +} + // Copy returns a deep copy of the Histogram. func (h *Histogram) Copy() *Histogram { - c := *h + c := Histogram{ + CounterResetHint: h.CounterResetHint, + Schema: h.Schema, + Count: h.Count, + Sum: h.Sum, + } + + if h.UsesCustomBuckets() { + if len(h.CustomValues) != 0 { + c.CustomValues = make([]float64, len(h.CustomValues)) + copy(c.CustomValues, h.CustomValues) + } + } else { + c.ZeroThreshold = h.ZeroThreshold + c.ZeroCount = h.ZeroCount + + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]int64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + } if len(h.PositiveSpans) != 0 { c.PositiveSpans = make([]Span, len(h.PositiveSpans)) copy(c.PositiveSpans, h.PositiveSpans) } - if len(h.NegativeSpans) != 0 { - c.NegativeSpans = make([]Span, len(h.NegativeSpans)) - copy(c.NegativeSpans, h.NegativeSpans) - } if len(h.PositiveBuckets) != 0 { c.PositiveBuckets = make([]int64, len(h.PositiveBuckets)) copy(c.PositiveBuckets, h.PositiveBuckets) } - if len(h.NegativeBuckets) != 0 { - c.NegativeBuckets = make([]int64, len(h.NegativeBuckets)) - copy(c.NegativeBuckets, h.NegativeBuckets) - } return &c } +// CopyTo makes a deep copy into the given Histogram object. +// The destination object has to be a non-nil pointer. +func (h *Histogram) CopyTo(to *Histogram) { + to.CounterResetHint = h.CounterResetHint + to.Schema = h.Schema + to.Count = h.Count + to.Sum = h.Sum + + if h.UsesCustomBuckets() { + to.ZeroThreshold = 0 + to.ZeroCount = 0 + + to.NegativeSpans = clearIfNotNil(to.NegativeSpans) + to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) + + to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) + copy(to.CustomValues, h.CustomValues) + } else { + to.ZeroThreshold = h.ZeroThreshold + to.ZeroCount = h.ZeroCount + + to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) + copy(to.NegativeSpans, h.NegativeSpans) + + to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) + copy(to.NegativeBuckets, h.NegativeBuckets) + + to.CustomValues = clearIfNotNil(to.CustomValues) + } + + to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) + copy(to.PositiveSpans, h.PositiveSpans) + + to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) + copy(to.PositiveBuckets, h.PositiveBuckets) +} + // String returns a string representation of the Histogram. func (h *Histogram) String() string { var sb strings.Builder @@ -134,8 +199,11 @@ func (h *Histogram) String() string { return sb.String() } -// ZeroBucket returns the zero bucket. +// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets. func (h *Histogram) ZeroBucket() Bucket[uint64] { + if h.UsesCustomBuckets() { + panic("histograms with custom buckets have no zero bucket") + } return Bucket[uint64]{ Lower: -h.ZeroThreshold, Upper: h.ZeroThreshold, @@ -148,13 +216,15 @@ func (h *Histogram) ZeroBucket() Bucket[uint64] { // PositiveBucketIterator returns a BucketIterator to iterate over all positive // buckets in ascending order (starting next to the zero bucket and going up). func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { - return newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) + it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues) + return &it } // NegativeBucketIterator returns a BucketIterator to iterate over all negative // buckets in descending order (starting next to the zero bucket and going down). func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] { - return newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) + it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil) + return &it } // CumulativeBucketIterator returns a BucketIterator to iterate over a @@ -172,27 +242,42 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] { // Exact match is when there are no new buckets (even empty) and no missing buckets, // and all the bucket values match. Spans can have different empty length spans in between, // but they must represent the same bucket layout to match. +// Sum is compared based on its bit pattern because this method +// is about data equality rather than mathematical equality. +// We ignore fields that are not used based on the exponential / custom buckets schema, +// but check fields where differences may cause unintended behaviour even if they are not +// supposed to be used according to the schema. func (h *Histogram) Equals(h2 *Histogram) bool { if h2 == nil { return false } - if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || - h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum { + if h.Schema != h2.Schema || h.Count != h2.Count || + math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { return false } - if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + if h.UsesCustomBuckets() { + if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) { + return false + } + } + + if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount { return false } + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { return false } + if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } - if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { return false } - if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) { return false } @@ -275,64 +360,136 @@ func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { return h } -// ToFloat returns a FloatHistogram representation of the Histogram. It is a -// deep copy (e.g. spans are not shared). -func (h *Histogram) ToFloat() *FloatHistogram { - var ( - positiveSpans, negativeSpans []Span - positiveBuckets, negativeBuckets []float64 - ) - if len(h.PositiveSpans) != 0 { - positiveSpans = make([]Span, len(h.PositiveSpans)) - copy(positiveSpans, h.PositiveSpans) +// ToFloat returns a FloatHistogram representation of the Histogram. It is a deep +// copy (e.g. spans are not shared). The function accepts a FloatHistogram as an +// argument whose memory will be reused and overwritten if provided. If this +// argument is nil, a new FloatHistogram will be allocated. +func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { + if fh == nil { + fh = &FloatHistogram{} + } + fh.CounterResetHint = h.CounterResetHint + fh.Schema = h.Schema + fh.Count = float64(h.Count) + fh.Sum = h.Sum + + if h.UsesCustomBuckets() { + fh.ZeroThreshold = 0 + fh.ZeroCount = 0 + fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans) + fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets) + + fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues)) + copy(fh.CustomValues, h.CustomValues) + } else { + fh.ZeroThreshold = h.ZeroThreshold + fh.ZeroCount = float64(h.ZeroCount) + + fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans)) + copy(fh.NegativeSpans, h.NegativeSpans) + + fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets)) + var currentNegative float64 + for i, b := range h.NegativeBuckets { + currentNegative += float64(b) + fh.NegativeBuckets[i] = currentNegative + } + fh.CustomValues = clearIfNotNil(fh.CustomValues) } - if len(h.NegativeSpans) != 0 { - negativeSpans = make([]Span, len(h.NegativeSpans)) - copy(negativeSpans, h.NegativeSpans) + + fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) + copy(fh.PositiveSpans, h.PositiveSpans) + + fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets)) + var currentPositive float64 + for i, b := range h.PositiveBuckets { + currentPositive += float64(b) + fh.PositiveBuckets[i] = currentPositive } - if len(h.PositiveBuckets) != 0 { - positiveBuckets = make([]float64, len(h.PositiveBuckets)) - var current float64 - for i, b := range h.PositiveBuckets { - current += float64(b) - positiveBuckets[i] = current - } + + return fh +} + +func resize[T any](items []T, n int) []T { + if cap(items) < n { + return make([]T, n) } - if len(h.NegativeBuckets) != 0 { - negativeBuckets = make([]float64, len(h.NegativeBuckets)) - var current float64 - for i, b := range h.NegativeBuckets { - current += float64(b) - negativeBuckets[i] = current + return items[:n] +} + +// Validate validates consistency between span and bucket slices. Also, buckets are checked +// against negative values. We check to make sure there are no unexpected fields or field values +// based on the exponential / custom buckets schema. +// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a +// strict h.Count = nCount + pCount + h.ZeroCount check is performed. +// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount), +// because NaN observations do not increment the values of buckets (but they do increment +// the total h.Count). +func (h *Histogram) Validate() error { + var nCount, pCount uint64 + if h.UsesCustomBuckets() { + if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("custom buckets: %w", err) + } + if h.ZeroCount != 0 { + return fmt.Errorf("custom buckets: must have zero count of 0") + } + if h.ZeroThreshold != 0 { + return fmt.Errorf("custom buckets: must have zero threshold of 0") + } + if len(h.NegativeSpans) > 0 { + return fmt.Errorf("custom buckets: must not have negative spans") + } + if len(h.NegativeBuckets) > 0 { + return fmt.Errorf("custom buckets: must not have negative buckets") + } + } else { + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + if h.CustomValues != nil { + return fmt.Errorf("histogram with exponential schema must not have custom bounds") } } + err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) + if err != nil { + return fmt.Errorf("positive side: %w", err) + } - return &FloatHistogram{ - CounterResetHint: h.CounterResetHint, - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: float64(h.ZeroCount), - Count: float64(h.Count), - Sum: h.Sum, - PositiveSpans: positiveSpans, - NegativeSpans: negativeSpans, - PositiveBuckets: positiveBuckets, - NegativeBuckets: negativeBuckets, + sumOfBuckets := nCount + pCount + h.ZeroCount + if math.IsNaN(h.Sum) { + if sumOfBuckets > h.Count { + return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountNotBigEnough) + } + } else { + if sumOfBuckets != h.Count { + return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountMismatch) + } } + + return nil } type regularBucketIterator struct { baseBucketIterator[uint64, int64] } -func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) *regularBucketIterator { +func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customValues []float64) regularBucketIterator { i := baseBucketIterator[uint64, int64]{ - schema: schema, - spans: spans, - buckets: buckets, - positive: positive, + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + customValues: customValues, } - return ®ularBucketIterator{i} + return regularBucketIterator{i} } func (r *regularBucketIterator) Next() bool { @@ -404,7 +561,7 @@ func (c *cumulativeBucketIterator) Next() bool { if c.emptyBucketCount > 0 { // We are traversing through empty buckets at the moment. - c.currUpper = getBound(c.currIdx, c.h.Schema) + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues) c.currIdx++ c.emptyBucketCount-- return true @@ -421,7 +578,7 @@ func (c *cumulativeBucketIterator) Next() bool { c.currCount += c.h.PositiveBuckets[c.posBucketsIdx] c.currCumulativeCount += uint64(c.currCount) - c.currUpper = getBound(c.currIdx, c.h.Schema) + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues) c.posBucketsIdx++ c.idxInSpan++ @@ -448,3 +605,28 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] { Index: c.currIdx - 1, } } + +// ReduceResolution reduces the histogram's spans, buckets into target schema. +// The target schema must be smaller than the current histogram's schema. +// This will panic if the histogram has custom buckets or if the target schema is +// a custom buckets schema. +func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { + if h.UsesCustomBuckets() { + panic("cannot reduce resolution when there are custom buckets") + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } + if targetSchema >= h.Schema { + panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) + } + + h.PositiveSpans, h.PositiveBuckets = reduceResolution( + h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, true, true, + ) + h.NegativeSpans, h.NegativeBuckets = reduceResolution( + h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, true, true, + ) + h.Schema = targetSchema + return h +} diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index 3975353d2a..edc8663c94 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -14,11 +14,13 @@ package histogram import ( - "fmt" "math" + "strconv" "testing" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/value" ) func TestHistogramString(t *testing.T) { @@ -67,10 +69,25 @@ func TestHistogramString(t *testing.T) { }, expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}", }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + Count: 19, + Sum: 2.7, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + CustomValues: []float64{1, 2, 5, 10, 15, 20, 25, 50}, + }, + expectedString: "{count:19, sum:2.7, [-Inf,1]:1, (1,2]:3, (2,5]:1, (5,10]:2, (10,15]:1, (15,20]:1, (20,25]:1}", + }, } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { actualString := c.histogram.String() require.Equal(t, c.expectedString, actualString) }) @@ -206,10 +223,30 @@ func TestCumulativeBucketIterator(t *testing.T) { {Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2}, }, }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{5, 10, 20, 50}, + }, + expectedBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: math.Inf(-1), Upper: 10, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 1}, + + {Lower: math.Inf(-1), Upper: 20, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 2}, + + {Lower: math.Inf(-1), Upper: 50, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 3}, + {Lower: math.Inf(-1), Upper: math.Inf(1), Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 4}, + }, + }, } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { it := c.histogram.CumulativeBucketIterator() actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets)) for it.Next() { @@ -366,10 +403,66 @@ func TestRegularBucketIterator(t *testing.T) { }, expectedNegativeBuckets: []Bucket[uint64]{}, }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{5, 10, 20, 50}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: 5, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + + {Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3}, + {Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{0, 10, 20, 50}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 0, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: 0, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + + {Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3}, + {Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 5}, + }, + PositiveBuckets: []int64{1, 1, 0, -1, 0}, + CustomValues: []float64{-5, 0, 20, 50}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: -5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: -5, Upper: 0, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + {Lower: 0, Upper: 20, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 2}, + {Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3}, + {Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { it := c.histogram.PositiveBucketIterator() actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets)) for it.Next() { @@ -406,16 +499,134 @@ func TestHistogramToFloat(t *testing.T) { }, NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, } - fh := h.ToFloat() + cases := []struct { + name string + fh *FloatHistogram + }{ + {name: "without prior float histogram"}, + {name: "prior float histogram with more buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }}, + {name: "prior float histogram with fewer buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2}, + }}, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + fh := h.ToFloat(c.fh) + require.Equal(t, h.String(), fh.String()) + }) + } +} + +func TestCustomBucketsHistogramToFloat(t *testing.T) { + h := Histogram{ + Schema: CustomBucketsSchema, + Count: 10, + Sum: 2.7, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + CustomValues: []float64{5, 10, 20, 50, 100, 500}, + } + cases := []struct { + name string + fh *FloatHistogram + }{ + {name: "without prior float histogram"}, + {name: "prior float histogram with more buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }}, + {name: "prior float histogram with fewer buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2}, + }}, + } - require.Equal(t, h.String(), fh.String()) + require.NoError(t, h.Validate()) + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + hStr := h.String() + fh := h.ToFloat(c.fh) + require.NoError(t, fh.Validate()) + require.Equal(t, hStr, h.String()) + require.Equal(t, hStr, fh.String()) + }) + } } -// TestHistogramMatches tests both Histogram and FloatHistogram. -func TestHistogramMatches(t *testing.T) { +// TestHistogramEquals tests both Histogram and FloatHistogram. +func TestHistogramEquals(t *testing.T) { h1 := Histogram{ Schema: 3, - Count: 61, + Count: 62, Sum: 2.7, ZeroThreshold: 0.1, ZeroCount: 42, @@ -434,17 +645,26 @@ func TestHistogramMatches(t *testing.T) { equals := func(h1, h2 Histogram) { require.True(t, h1.Equals(&h2)) require.True(t, h2.Equals(&h1)) - h1f, h2f := h1.ToFloat(), h2.ToFloat() + h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil) require.True(t, h1f.Equals(h2f)) require.True(t, h2f.Equals(h1f)) } notEquals := func(h1, h2 Histogram) { require.False(t, h1.Equals(&h2)) require.False(t, h2.Equals(&h1)) - h1f, h2f := h1.ToFloat(), h2.ToFloat() + h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil) require.False(t, h1f.Equals(h2f)) require.False(t, h2f.Equals(h1f)) } + notEqualsUntilFloatConv := func(h1, h2 Histogram) { + require.False(t, h1.Equals(&h2)) + require.False(t, h2.Equals(&h1)) + h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil) + require.True(t, h1f.Equals(h2f)) + require.True(t, h2f.Equals(h1f)) + } + + require.NoError(t, h1.Validate()) h2 := h1.Copy() equals(h1, *h2) @@ -537,6 +757,212 @@ func TestHistogramMatches(t *testing.T) { }) h2.NegativeBuckets = append(h2.NegativeBuckets, 1) notEquals(h1, *h2) + + // Sum is StaleNaN. + hStale := h1.Copy() + hStale.Sum = math.Float64frombits(value.StaleNaN) + notEquals(h1, *hStale) + equals(*hStale, *hStale) + + // Sum is NaN (but not a StaleNaN). + hNaN := h1.Copy() + hNaN.Sum = math.NaN() + notEquals(h1, *hNaN) + equals(*hNaN, *hNaN) + + // Sum StaleNaN vs regular NaN. + notEquals(*hStale, *hNaN) + + // Has non-empty custom bounds for exponential schema. + hCustom := h1.Copy() + hCustom.CustomValues = []float64{1, 2, 3} + equals(h1, *hCustom) + + cbh1 := Histogram{ + Schema: CustomBucketsSchema, + Count: 10, + Sum: 2.7, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 10, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + CustomValues: []float64{0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 200, 250, 500, 1000}, + } + + require.NoError(t, cbh1.Validate()) + + cbh2 := cbh1.Copy() + equals(cbh1, *cbh2) + + // Has different custom bounds for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.CustomValues = []float64{0.1, 0.2, 0.5} + notEquals(cbh1, *cbh2) + + // Has non-empty negative spans and buckets for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.NegativeSpans = []Span{{Offset: 0, Length: 1}} + cbh2.NegativeBuckets = []int64{1} + notEqualsUntilFloatConv(cbh1, *cbh2) + + // Has non-zero zero count and threshold for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.ZeroThreshold = 0.1 + cbh2.ZeroCount = 10 + notEqualsUntilFloatConv(cbh1, *cbh2) +} + +func TestHistogramCopy(t *testing.T) { + cases := []struct { + name string + orig *Histogram + expected *Histogram + }{ + { + name: "without buckets", + orig: &Histogram{}, + expected: &Histogram{}, + }, + { + name: "with buckets", + orig: &Histogram{ + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + NegativeSpans: []Span{{3, 2}}, + NegativeBuckets: []int64{5, 3, 1.234e5, 1000}, + }, + expected: &Histogram{ + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + NegativeSpans: []Span{{3, 2}}, + NegativeBuckets: []int64{5, 3, 1.234e5, 1000}, + }, + }, + { + name: "with empty buckets and non empty capacity", + orig: &Histogram{ + PositiveSpans: make([]Span, 0, 1), + PositiveBuckets: make([]int64, 0, 1), + NegativeSpans: make([]Span, 0, 1), + NegativeBuckets: make([]int64, 0, 1), + }, + expected: &Histogram{}, + }, + { + name: "with custom buckets", + orig: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + expected: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + }, + } + + for _, tcase := range cases { + t.Run(tcase.name, func(t *testing.T) { + hCopy := tcase.orig.Copy() + + // Modify a primitive value in the original histogram. + tcase.orig.Sum++ + require.Equal(t, tcase.expected, hCopy) + assertDeepCopyHSpans(t, tcase.orig, hCopy, tcase.expected) + }) + } +} + +func TestHistogramCopyTo(t *testing.T) { + cases := []struct { + name string + orig *Histogram + expected *Histogram + }{ + { + name: "without buckets", + orig: &Histogram{}, + expected: &Histogram{}, + }, + { + name: "with buckets", + orig: &Histogram{ + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + NegativeSpans: []Span{{3, 2}}, + NegativeBuckets: []int64{5, 3, 1.234e5, 1000}, + }, + expected: &Histogram{ + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + NegativeSpans: []Span{{3, 2}}, + NegativeBuckets: []int64{5, 3, 1.234e5, 1000}, + }, + }, + { + name: "with empty buckets and non empty capacity", + orig: &Histogram{ + PositiveSpans: make([]Span, 0, 1), + PositiveBuckets: make([]int64, 0, 1), + NegativeSpans: make([]Span, 0, 1), + NegativeBuckets: make([]int64, 0, 1), + }, + expected: &Histogram{}, + }, + { + name: "with custom buckets", + orig: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + expected: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + }, + } + + for _, tcase := range cases { + t.Run(tcase.name, func(t *testing.T) { + hCopy := &Histogram{} + tcase.orig.CopyTo(hCopy) + + // Modify a primitive value in the original histogram. + tcase.orig.Sum++ + require.Equal(t, tcase.expected, hCopy) + assertDeepCopyHSpans(t, tcase.orig, hCopy, tcase.expected) + }) + } +} + +func assertDeepCopyHSpans(t *testing.T, orig, hCopy, expected *Histogram) { + // Do an in-place expansion of an original spans slice. + orig.PositiveSpans = expandSpans(orig.PositiveSpans) + orig.PositiveSpans[len(orig.PositiveSpans)-1] = Span{1, 2} + + hCopy.PositiveSpans = expandSpans(hCopy.PositiveSpans) + expected.PositiveSpans = expandSpans(expected.PositiveSpans) + // Expand the copy spans and assert that modifying the original has not affected the copy. + require.Equal(t, expected, hCopy) +} + +func expandSpans(spans []Span) []Span { + n := len(spans) + if cap(spans) > n { + spans = spans[:n+1] + } else { + spans = append(spans, Span{}) + } + return spans } func TestHistogramCompact(t *testing.T) { @@ -784,6 +1210,86 @@ func TestHistogramCompact(t *testing.T) { NegativeBuckets: []int64{2, 3}, }, }, + { + "nothing should happen with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + }, + { + "eliminate zero offsets with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 5}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + }, + { + "eliminate zero length with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + }, + { + "eliminate multiple zero length spans with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {9, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + }, + { + "cut empty buckets at start or end of spans, even in the middle, with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-4, 6}, {3, 6}}, + PositiveBuckets: []int64{0, 0, 1, 3, -4, 0, 1, 42, 3, -46, 0, 0}, + CustomValues: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + }, } for _, c := range cases { @@ -794,3 +1300,343 @@ func TestHistogramCompact(t *testing.T) { }) } } + +func TestHistogramValidation(t *testing.T) { + tests := map[string]struct { + h *Histogram + errMsg string + skipFloat bool + }{ + "valid histogram": { + h: &Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 19.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{1, 1, -1, 0}, + }, + }, + "valid histogram with NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { + // This case is possible if NaN values (which do not fall into any bucket) are observed. + h: &Histogram{ + ZeroCount: 2, + Count: 4, + Sum: math.NaN(), + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + }, + }, + "rejects histogram without NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)": { + h: &Histogram{ + ZeroCount: 2, + Count: 4, + Sum: 333, + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + }, + errMsg: `3 observations found in buckets, but the Count field is 4: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, + skipFloat: true, + }, + "rejects histogram that has too few negative buckets": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{}, + }, + errMsg: `negative side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects histogram that has too few positive buckets": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{}, + }, + errMsg: `positive side: spans need 1 buckets, have 0 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects histogram that has too many negative buckets": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{1, 2}, + }, + errMsg: `negative side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects histogram that has too many positive buckets": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1, 2}, + }, + errMsg: `positive side: spans need 1 buckets, have 2 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects a histogram that has a negative span with a negative offset": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1, 2}, + }, + errMsg: `negative side: span number 2 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a histogram which has a positive span with a negative offset": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, + PositiveBuckets: []int64{1, 2}, + }, + errMsg: `positive side: span number 2 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a histogram that has a negative bucket with a negative count": { + h: &Histogram{ + NegativeSpans: []Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{-1}, + }, + errMsg: `negative side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`, + }, + "rejects a histogram that has a positive bucket with a negative count": { + h: &Histogram{ + PositiveSpans: []Span{{Offset: -1, Length: 1}}, + PositiveBuckets: []int64{-1}, + }, + errMsg: `positive side: bucket number 1 has observation count of -1: histogram has a bucket whose observation count is negative`, + }, + "rejects a histogram that has a lower count than count in buckets": { + h: &Histogram{ + Count: 0, + NegativeSpans: []Span{{Offset: -1, Length: 1}}, + PositiveSpans: []Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1}, + PositiveBuckets: []int64{1}, + }, + errMsg: `2 observations found in buckets, but the Count field is 0: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, + skipFloat: true, + }, + "rejects a histogram that doesn't count the zero bucket in its count": { + h: &Histogram{ + Count: 2, + ZeroCount: 1, + NegativeSpans: []Span{{Offset: -1, Length: 1}}, + PositiveSpans: []Span{{Offset: -1, Length: 1}}, + NegativeBuckets: []int64{1}, + PositiveBuckets: []int64{1}, + }, + errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, + skipFloat: true, + }, + "rejects an exponential histogram with custom buckets schema": { + h: &Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{1, 1, -1, 0}, + }, + errMsg: `custom buckets: only 0 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`, + }, + "rejects a custom buckets histogram with exponential schema": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4}, + }, + errMsg: `histogram with exponential schema must not have custom bounds`, + skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation + }, + "rejects a custom buckets histogram with zero/negative buckets": { + h: &Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: must have zero count of 0`, + skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation + }, + "rejects a custom buckets histogram with negative offset in first span": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: -1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: span number 1 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a custom buckets histogram with negative offset in subsequent spans": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: -1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: span number 2 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a custom buckets histogram with non-matching bucket counts": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: spans need 4 buckets, have 3 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects a custom buckets histogram with too few bounds": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3}, + }, + errMsg: `custom buckets: only 3 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`, + }, + "valid custom buckets histogram": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4}, + }, + }, + "valid custom buckets histogram with extra bounds": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8}, + }, + }, + } + + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + if err := tc.h.Validate(); tc.errMsg != "" { + require.EqualError(t, err, tc.errMsg) + } else { + require.NoError(t, err) + } + if tc.skipFloat { + return + } + + fh := tc.h.ToFloat(nil) + if err := fh.Validate(); tc.errMsg != "" { + require.EqualError(t, err, tc.errMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func BenchmarkHistogramValidation(b *testing.B) { + histograms := GenerateBigTestHistograms(b.N, 500) + b.ResetTimer() + for _, h := range histograms { + require.NoError(b, h.Validate()) + } +} + +func TestHistogramReduceResolution(t *testing.T) { + tcs := map[string]struct { + origin *Histogram + target *Histogram + }{ + "valid histogram": { + origin: &Histogram{ + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 3, Length: 2}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, + }, + target: &Histogram{ + Schema: -1, + PositiveSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []int64{1, 3, -2, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []int64{1, 3, -2, 0}, + }, + }, + } + + for _, tc := range tcs { + target := tc.origin.ReduceResolution(tc.target.Schema) + require.Equal(t, tc.target, target) + // Check that receiver histogram was mutated: + require.Equal(t, tc.target, tc.origin) + } +} diff --git a/model/histogram/test_utils.go b/model/histogram/test_utils.go new file mode 100644 index 0000000000..9e9a711c29 --- /dev/null +++ b/model/histogram/test_utils.go @@ -0,0 +1,52 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +// GenerateBigTestHistograms generates a slice of histograms with given number of buckets each. +func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram { + numSpans := numBuckets / 10 + bucketsPerSide := numBuckets / 2 + spanLength := uint32(bucketsPerSide / numSpans) + // Given all bucket deltas are 1, sum bucketsPerSide + 1. + observationCount := bucketsPerSide * (1 + bucketsPerSide) + + var histograms []*Histogram + for i := 0; i < numHistograms; i++ { + h := &Histogram{ + Count: uint64(i + observationCount), + ZeroCount: uint64(i), + ZeroThreshold: 1e-128, + Sum: 18.4 * float64(i+1), + Schema: 2, + NegativeSpans: make([]Span, numSpans), + PositiveSpans: make([]Span, numSpans), + NegativeBuckets: make([]int64, bucketsPerSide), + PositiveBuckets: make([]int64, bucketsPerSide), + } + + for j := 0; j < numSpans; j++ { + s := Span{Offset: 1, Length: spanLength} + h.NegativeSpans[j] = s + h.PositiveSpans[j] = s + } + + for j := 0; j < bucketsPerSide; j++ { + h.NegativeBuckets[j] = 1 + h.PositiveBuckets[j] = 1 + } + + histograms = append(histograms, h) + } + return histograms +} diff --git a/model/labels/labels.go b/model/labels/labels.go index 0c27e15c72..f4de7496ce 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -11,37 +11,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !stringlabels +//go:build !stringlabels && !dedupelabels package labels import ( "bytes" - "encoding/json" - "strconv" + "slices" + "strings" "github.com/cespare/xxhash/v2" - "github.com/prometheus/common/model" - "golang.org/x/exp/slices" ) -// Well-known label names used by Prometheus components. -const ( - MetricName = "__name__" - AlertName = "alertname" - BucketLabel = "le" - InstanceName = "instance" - - labelSep = '\xfe' -) - -var seps = []byte{'\xff'} - -// Label is a key/value pair of strings. -type Label struct { - Name, Value string -} - // Labels is a sorted set of labels. Order has to be guaranteed upon // instantiation. type Labels []Label @@ -50,23 +31,6 @@ func (ls Labels) Len() int { return len(ls) } func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } -func (ls Labels) String() string { - var b bytes.Buffer - - b.WriteByte('{') - for i, l := range ls { - if i > 0 { - b.WriteByte(',') - b.WriteByte(' ') - } - b.WriteString(l.Name) - b.WriteByte('=') - b.WriteString(strconv.Quote(l.Value)) - } - b.WriteByte('}') - return b.String() -} - // Bytes returns ls as a byte slice. // It uses an byte invalid character as a separator and so should not be used for printing. func (ls Labels) Bytes(buf []byte) []byte { @@ -74,49 +38,15 @@ func (ls Labels) Bytes(buf []byte) []byte { b.WriteByte(labelSep) for i, l := range ls { if i > 0 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(l.Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(l.Value) } return b.Bytes() } -// MarshalJSON implements json.Marshaler. -func (ls Labels) MarshalJSON() ([]byte, error) { - return json.Marshal(ls.Map()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (ls *Labels) UnmarshalJSON(b []byte) error { - var m map[string]string - - if err := json.Unmarshal(b, &m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - -// MarshalYAML implements yaml.Marshaler. -func (ls Labels) MarshalYAML() (interface{}, error) { - return ls.Map(), nil -} - -// UnmarshalYAML implements yaml.Unmarshaler. -func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { - var m map[string]string - - if err := unmarshal(&m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - // MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. // If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. func (ls Labels) MatchLabels(on bool, names ...string) Labels { @@ -156,9 +86,9 @@ func (ls Labels) Hash() uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b) } @@ -176,9 +106,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { i++ default: b = append(b, ls[i].Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, ls[i].Value...) - b = append(b, seps[0]) + b = append(b, sep) i++ j++ } @@ -200,9 +130,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, ls[i].Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, ls[i].Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -221,10 +151,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { i++ default: if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(ls[i].Value) i++ j++ @@ -247,10 +177,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { continue } if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(ls[i].Value) } return b.Bytes() @@ -317,19 +247,6 @@ func (ls Labels) WithoutEmpty() Labels { return ls } -// IsValid checks if the metric name or label names are valid. -func (ls Labels) IsValid() bool { - for _, l := range ls { - if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return false - } - if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { - return false - } - } - return true -} - // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { if len(ls) != len(o) { @@ -343,15 +260,6 @@ func Equal(ls, o Labels) bool { return true } -// Map returns a string map of the labels. -func (ls Labels) Map() map[string]string { - m := make(map[string]string, len(ls)) - for _, l := range ls { - m[l.Name] = l.Value - } - return m -} - // EmptyLabels returns n empty Labels value, for convenience. func EmptyLabels() Labels { return Labels{} @@ -362,20 +270,11 @@ func EmptyLabels() Labels { func New(ls ...Label) Labels { set := make(Labels, 0, len(ls)) set = append(set, ls...) - slices.SortFunc(set, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(set, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) return set } -// FromMap returns new sorted Labels from the given map. -func FromMap(m map[string]string) Labels { - l := make([]Label, 0, len(m)) - for k, v := range m { - l = append(l, Label{Name: k, Value: v}) - } - return New(l...) -} - // FromStrings creates new labels from pairs of strings. func FromStrings(ss ...string) Labels { if len(ss)%2 != 0 { @@ -386,7 +285,7 @@ func FromStrings(ss ...string) Labels { res = append(res, Label{Name: ss[i], Value: ss[i+1]}) } - slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) return res } @@ -416,7 +315,8 @@ func Compare(a, b Labels) int { return len(a) - len(b) } -// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +// CopyFrom copies labels from b on top of whatever was in ls previously, +// reusing memory or expanding if needed. func (ls *Labels) CopyFrom(b Labels) { (*ls) = append((*ls)[:0], b...) } @@ -443,6 +343,21 @@ func (ls Labels) Validate(f func(l Label) error) error { return nil } +// DropMetricName returns Labels with "__name__" removed. +func (ls Labels) DropMetricName() Labels { + for i, l := range ls { + if l.Name == MetricName { + if i == 0 { // Make common case fast with no allocations. + return ls[1:] + } + // Avoid modifying original Labels - use [:i:i] so that left slice would not + // have any spare capacity and append would have to allocate a new slice for the result. + return append(ls[:i:i], ls[i+1:]...) + } + } + return ls +} + // InternStrings calls intern on every string value inside ls, replacing them with what it returns. func (ls *Labels) InternStrings(intern func(string) string) { for i, l := range *ls { @@ -466,109 +381,16 @@ type Builder struct { add []Label } -// NewBuilder returns a new LabelsBuilder. -func NewBuilder(base Labels) *Builder { - b := &Builder{ - del: make([]string, 0, 5), - add: make([]Label, 0, 5), - } - b.Reset(base) - return b -} - // Reset clears all current state for the builder. func (b *Builder) Reset(base Labels) { b.base = base b.del = b.del[:0] b.add = b.add[:0] - for _, l := range b.base { + b.base.Range(func(l Label) { if l.Value == "" { b.del = append(b.del, l.Name) } - } -} - -// Del deletes the label of the given name. -func (b *Builder) Del(ns ...string) *Builder { - for _, n := range ns { - for i, a := range b.add { - if a.Name == n { - b.add = append(b.add[:i], b.add[i+1:]...) - } - } - b.del = append(b.del, n) - } - return b -} - -// Keep removes all labels from the base except those with the given names. -func (b *Builder) Keep(ns ...string) *Builder { -Outer: - for _, l := range b.base { - for _, n := range ns { - if l.Name == n { - continue Outer - } - } - b.del = append(b.del, l.Name) - } - return b -} - -// Set the name/value pair as a label. A value of "" means delete that label. -func (b *Builder) Set(n, v string) *Builder { - if v == "" { - // Empty labels are the same as missing labels. - return b.Del(n) - } - for i, a := range b.add { - if a.Name == n { - b.add[i].Value = v - return b - } - } - b.add = append(b.add, Label{Name: n, Value: v}) - - return b -} - -func (b *Builder) Get(n string) string { - // Del() removes entries from .add but Set() does not remove from .del, so check .add first. - for _, a := range b.add { - if a.Name == n { - return a.Value - } - } - if slices.Contains(b.del, n) { - return "" - } - return b.base.Get(n) -} - -// Range calls f on each label in the Builder. -func (b *Builder) Range(f func(l Label)) { - // Stack-based arrays to avoid heap allocation in most cases. - var addStack [128]Label - var delStack [128]string - // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). - origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) - b.base.Range(func(l Label) { - if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { - f(l) - } }) - for _, a := range origAdd { - f(a) - } -} - -func contains(s []Label, n string) bool { - for _, a := range s { - if a.Name == n { - return true - } - } - return false } // Labels returns the labels from the builder. @@ -591,7 +413,7 @@ func (b *Builder) Labels() Labels { } if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it. res = append(res, b.add...) - slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) } return res } @@ -601,11 +423,32 @@ type ScratchBuilder struct { add Labels } +// SymbolTable is no-op, just for api parity with dedupelabels. +type SymbolTable struct{} + +func NewSymbolTable() *SymbolTable { return nil } + +func (t *SymbolTable) Len() int { return 0 } + // NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. func NewScratchBuilder(n int) ScratchBuilder { return ScratchBuilder{add: make([]Label, 0, n)} } +// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels. +func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder { + return NewBuilder(EmptyLabels()) +} + +// NewScratchBuilderWithSymbolTable creates a ScratchBuilder, for api parity with dedupelabels. +func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { + return NewScratchBuilder(n) +} + +func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { + // no-op +} + func (b *ScratchBuilder) Reset() { b.add = b.add[:0] } @@ -616,9 +459,16 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } +// UnsafeAddBytes adds a name/value pair, using []byte instead of string. +// The '-tags stringlabels' version of this function is unsafe, hence the name. +// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: string(name), Value: string(value)}) +} + // Sort the labels added so far by name. func (b *ScratchBuilder) Sort() { - slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) } // Assign is for when you already have a Labels which you want this ScratchBuilder to return. @@ -626,14 +476,14 @@ func (b *ScratchBuilder) Assign(ls Labels) { b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. } -// Return the name/value pairs added so far as a Labels object. +// Labels returns the name/value pairs added so far as a Labels object. // Note: if you want them sorted, call Sort() first. func (b *ScratchBuilder) Labels() Labels { // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. return append([]Label{}, b.add...) } -// Write the newly-built Labels out to ls. +// Overwrite the newly-built Labels out to ls. // Callers must ensure that there are no other references to ls, or any strings fetched from it. func (b *ScratchBuilder) Overwrite(ls *Labels) { *ls = append((*ls)[:0], b.add...) diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go new file mode 100644 index 0000000000..99529a3836 --- /dev/null +++ b/model/labels/labels_common.go @@ -0,0 +1,234 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "bytes" + "encoding/json" + "slices" + "strconv" + "unsafe" + + "github.com/prometheus/common/model" +) + +const ( + MetricName = "__name__" + AlertName = "alertname" + BucketLabel = "le" + InstanceName = "instance" + + labelSep = '\xfe' // Used at beginning of `Bytes` return. + sep = '\xff' // Used between labels in `Bytes` and `Hash`. +) + +var seps = []byte{sep} // Used with Hash, which has no WriteByte method. + +// Label is a key/value pair of strings. +type Label struct { + Name, Value string +} + +func (ls Labels) String() string { + var bytea [1024]byte // On stack to avoid memory allocation while building the output. + b := bytes.NewBuffer(bytea[:0]) + + b.WriteByte('{') + i := 0 + ls.Range(func(l Label) { + if i > 0 { + b.WriteByte(',') + b.WriteByte(' ') + } + b.WriteString(l.Name) + b.WriteByte('=') + b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Value)) + i++ + }) + b.WriteByte('}') + return b.String() +} + +// MarshalJSON implements json.Marshaler. +func (ls Labels) MarshalJSON() ([]byte, error) { + return json.Marshal(ls.Map()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (ls *Labels) UnmarshalJSON(b []byte) error { + var m map[string]string + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MarshalYAML implements yaml.Marshaler. +func (ls Labels) MarshalYAML() (interface{}, error) { + return ls.Map(), nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]string + + if err := unmarshal(&m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid(validationScheme model.ValidationScheme) bool { + err := ls.Validate(func(l Label) error { + if l.Name == model.MetricNameLabel { + // If the default validation scheme has been overridden with legacy mode, + // we need to call the special legacy validation checker. + if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) { + return strconv.ErrSyntax + } + if !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } + } + if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation { + if !model.LabelName(l.Name).IsValidLegacy() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + } else if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + return nil + }) + return err == nil +} + +// Map returns a string map of the labels. +func (ls Labels) Map() map[string]string { + m := make(map[string]string) + ls.Range(func(l Label) { + m[l.Name] = l.Value + }) + return m +} + +// FromMap returns new sorted Labels from the given map. +func FromMap(m map[string]string) Labels { + l := make([]Label, 0, len(m)) + for k, v := range m { + l = append(l, Label{Name: k, Value: v}) + } + return New(l...) +} + +// NewBuilder returns a new LabelsBuilder. +func NewBuilder(base Labels) *Builder { + b := &Builder{ + del: make([]string, 0, 5), + add: make([]Label, 0, 5), + } + b.Reset(base) + return b +} + +// Del deletes the label of the given name. +func (b *Builder) Del(ns ...string) *Builder { + for _, n := range ns { + for i, a := range b.add { + if a.Name == n { + b.add = append(b.add[:i], b.add[i+1:]...) + } + } + b.del = append(b.del, n) + } + return b +} + +// Keep removes all labels from the base except those with the given names. +func (b *Builder) Keep(ns ...string) *Builder { + b.base.Range(func(l Label) { + for _, n := range ns { + if l.Name == n { + return + } + } + b.del = append(b.del, l.Name) + }) + return b +} + +// Set the name/value pair as a label. A value of "" means delete that label. +func (b *Builder) Set(n, v string) *Builder { + if v == "" { + // Empty labels are the same as missing labels. + return b.Del(n) + } + for i, a := range b.add { + if a.Name == n { + b.add[i].Value = v + return b + } + } + b.add = append(b.add, Label{Name: n, Value: v}) + + return b +} + +func (b *Builder) Get(n string) string { + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. + for _, a := range b.add { + if a.Name == n { + return a.Value + } + } + if slices.Contains(b.del, n) { + return "" + } + return b.base.Get(n) +} + +// Range calls f on each label in the Builder. +func (b *Builder) Range(f func(l Label)) { + // Stack-based arrays to avoid heap allocation in most cases. + var addStack [128]Label + var delStack [128]string + // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). + origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) + b.base.Range(func(l Label) { + if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { + f(l) + } + }) + for _, a := range origAdd { + f(a) + } +} + +func contains(s []Label, n string) bool { + for _, a := range s { + if a.Name == n { + return true + } + } + return false +} + +func yoloString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go new file mode 100644 index 0000000000..da8a88cc15 --- /dev/null +++ b/model/labels/labels_dedupelabels.go @@ -0,0 +1,817 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build dedupelabels + +package labels + +import ( + "bytes" + "slices" + "strings" + "sync" + + "github.com/cespare/xxhash/v2" +) + +// Labels is implemented by a SymbolTable and string holding name/value +// pairs encoded as indexes into the table in varint encoding. +// Names are in alphabetical order. +type Labels struct { + syms *nameTable + data string +} + +// Split SymbolTable into the part used by Labels and the part used by Builder. Only the latter needs the map. + +// This part is used by Labels. All fields are immutable after construction. +type nameTable struct { + byNum []string // This slice header is never changed, even while we are building the symbol table. + symbolTable *SymbolTable // If we need to use it in a Builder. +} + +// SymbolTable is used to map strings into numbers so they can be packed together. +type SymbolTable struct { + mx sync.Mutex + *nameTable + nextNum int + byName map[string]int +} + +const defaultSymbolTableSize = 1024 + +func NewSymbolTable() *SymbolTable { + t := &SymbolTable{ + nameTable: &nameTable{byNum: make([]string, defaultSymbolTableSize)}, + byName: make(map[string]int, defaultSymbolTableSize), + } + t.nameTable.symbolTable = t + return t +} + +func (t *SymbolTable) Len() int { + t.mx.Lock() + defer t.mx.Unlock() + return len(t.byName) +} + +// ToNum maps a string to an integer, adding the string to the table if it is not already there. +// Note: copies the string before adding, in case the caller passed part of +// a buffer that should not be kept alive by this SymbolTable. +func (t *SymbolTable) ToNum(name string) int { + t.mx.Lock() + defer t.mx.Unlock() + return t.toNumUnlocked(name) +} + +func (t *SymbolTable) toNumUnlocked(name string) int { + if i, found := t.byName[name]; found { + return i + } + i := t.nextNum + if t.nextNum == cap(t.byNum) { + // Name table is full; copy to a new one. Don't touch the existing slice, as nameTable is immutable after construction. + newSlice := make([]string, cap(t.byNum)*2) + copy(newSlice, t.byNum) + t.nameTable = &nameTable{byNum: newSlice, symbolTable: t} + } + name = strings.Clone(name) + t.byNum[i] = name + t.byName[name] = i + t.nextNum++ + return i +} + +func (t *SymbolTable) checkNum(name string) (int, bool) { + t.mx.Lock() + defer t.mx.Unlock() + i, bool := t.byName[name] + return i, bool +} + +// ToName maps an integer to a string. +func (t *nameTable) ToName(num int) string { + return t.byNum[num] +} + +// "Varint" in this file is non-standard: we encode small numbers (up to 32767) in 2 bytes, +// because we expect most Prometheus to have more than 127 unique strings. +// And we don't encode numbers larger than 4 bytes because we don't expect more than 536,870,912 unique strings. +func decodeVarint(data string, index int) (int, int) { + b := int(data[index]) + int(data[index+1])<<8 + index += 2 + if b < 0x8000 { + return b, index + } + return decodeVarintRest(b, data, index) +} + +func decodeVarintRest(b int, data string, index int) (int, int) { + value := int(b & 0x7FFF) + b = int(data[index]) + index++ + if b < 0x80 { + return value | (b << 15), index + } + + value |= (b & 0x7f) << 15 + b = int(data[index]) + index++ + return value | (b << 22), index +} + +func decodeString(t *nameTable, data string, index int) (string, int) { + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(data[index]) + int(data[index+1])<<8 + index += 2 + if num >= 0x8000 { + num, index = decodeVarintRest(num, data, index) + } + return t.ToName(num), index +} + +// Bytes returns ls as a byte slice. +// It uses non-printing characters and so should not be used for printing. +func (ls Labels) Bytes(buf []byte) []byte { + b := bytes.NewBuffer(buf[:0]) + for i := 0; i < len(ls.data); { + if i > 0 { + b.WriteByte(sep) + } + var name, value string + name, i = decodeString(ls.syms, ls.data, i) + value, i = decodeString(ls.syms, ls.data, i) + b.WriteString(name) + b.WriteByte(sep) + b.WriteString(value) + } + return b.Bytes() +} + +// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. +func (ls Labels) IsZero() bool { + return len(ls.data) == 0 +} + +// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +// TODO: This is only used in printing an error message +func (ls Labels) MatchLabels(on bool, names ...string) Labels { + b := NewBuilder(ls) + if on { + b.Keep(names...) + } else { + b.Del(MetricName) + b.Del(names...) + } + return b.Labels() +} + +// Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. +func (ls Labels) Hash() uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for pos := 0; pos < len(ls.data); { + name, newPos := decodeString(ls.syms, ls.data, pos) + value, newPos := decodeString(ls.syms, ls.data, newPos) + if len(b)+len(name)+len(value)+2 >= cap(b) { + // If labels entry is 1KB+, hash the rest of them via Write(). + h := xxhash.New() + _, _ = h.Write(b) + for pos < len(ls.data) { + name, pos = decodeString(ls.syms, ls.data, pos) + value, pos = decodeString(ls.syms, ls.data, pos) + _, _ = h.WriteString(name) + _, _ = h.Write(seps) + _, _ = h.WriteString(value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, name...) + b = append(b, sep) + b = append(b, value...) + b = append(b, sep) + pos = newPos + } + return xxhash.Sum64(b) +} + +// HashForLabels returns a hash value for the labels matching the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.syms, ls.data, i) + value, i = decodeString(ls.syms, ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if j == len(names) { + break + } + if name == names[j] { + b = append(b, name...) + b = append(b, sep) + b = append(b, value...) + b = append(b, sep) + } + } + + return xxhash.Sum64(b), b +} + +// HashWithoutLabels returns a hash value for all labels except those matching +// the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.syms, ls.data, i) + value, i = decodeString(ls.syms, ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if name == MetricName || (j < len(names) && name == names[j]) { + continue + } + b = append(b, name...) + b = append(b, sep) + b = append(b, value...) + b = append(b, sep) + } + return xxhash.Sum64(b), b +} + +// BytesWithLabels is just as Bytes(), but only for labels matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { + b := bytes.NewBuffer(buf[:0]) + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.syms, ls.data, pos) + lValue, newPos := decodeString(ls.syms, ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) { + break + } + if lName == names[j] { + if b.Len() > 1 { + b.WriteByte(sep) + } + b.WriteString(lName) + b.WriteByte(sep) + b.WriteString(lValue) + } + pos = newPos + } + return b.Bytes() +} + +// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { + b := bytes.NewBuffer(buf[:0]) + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.syms, ls.data, pos) + lValue, newPos := decodeString(ls.syms, ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) || lName != names[j] { + if b.Len() > 1 { + b.WriteByte(sep) + } + b.WriteString(lName) + b.WriteByte(sep) + b.WriteString(lValue) + } + pos = newPos + } + return b.Bytes() +} + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + return Labels{syms: ls.syms, data: strings.Clone(ls.data)} +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + if name == "" { // Avoid crash in loop if someone asks for "". + return "" // Prometheus does not store blank label names. + } + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.syms, ls.data, i) + if lName == name { + lValue, _ = decodeString(ls.syms, ls.data, i) + return lValue + } else if lName[0] > name[0] { // Stop looking if we've gone past. + break + } + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(ls.data[i]) + int(ls.data[i+1])<<8 + i += 2 + if num >= 0x8000 { + _, i = decodeVarintRest(num, ls.data, i) + } + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + if name == "" { // Avoid crash in loop if someone asks for "". + return false // Prometheus does not store blank label names. + } + for i := 0; i < len(ls.data); { + var lName string + lName, i = decodeString(ls.syms, ls.data, i) + if lName == name { + return true + } else if lName[0] > name[0] { // Stop looking if we've gone past. + break + } + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(ls.data[i]) + int(ls.data[i+1])<<8 + i += 2 + if num >= 0x8000 { + _, i = decodeVarintRest(num, ls.data, i) + } + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + prevNum := -1 + for i := 0; i < len(ls.data); { + var lNum int + lNum, i = decodeVarint(ls.data, i) + _, i = decodeVarint(ls.data, i) + if lNum == prevNum { + return ls.syms.ToName(lNum), true + } + prevNum = lNum + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + if ls.IsEmpty() { + return ls + } + // Idea: have a constant symbol for blank, then we don't have to look it up. + blank, ok := ls.syms.symbolTable.checkNum("") + if !ok { // Symbol table has no entry for blank - none of the values can be blank. + return ls + } + for pos := 0; pos < len(ls.data); { + _, newPos := decodeVarint(ls.data, pos) + lValue, newPos := decodeVarint(ls.data, newPos) + if lValue != blank { + pos = newPos + continue + } + // Do not copy the slice until it's necessary. + // TODO: could optimise the case where all blanks are at the end. + // Note: we size the new buffer on the assumption there is exactly one blank value. + buf := make([]byte, pos, pos+(len(ls.data)-newPos)) + copy(buf, ls.data[:pos]) // copy the initial non-blank labels + pos = newPos // move past the first blank value + for pos < len(ls.data) { + var newPos int + _, newPos = decodeVarint(ls.data, pos) + lValue, newPos = decodeVarint(ls.data, newPos) + if lValue != blank { + buf = append(buf, ls.data[pos:newPos]...) + } + pos = newPos + } + return Labels{syms: ls.syms, data: yoloString(buf)} + } + return ls +} + +// Equal returns whether the two label sets are equal. +func Equal(a, b Labels) bool { + if a.syms == b.syms { + return a.data == b.data + } + + la, lb := len(a.data), len(b.data) + ia, ib := 0, 0 + for ia < la && ib < lb { + var aValue, bValue string + aValue, ia = decodeString(a.syms, a.data, ia) + bValue, ib = decodeString(b.syms, b.data, ib) + if aValue != bValue { + return false + } + } + if ia != la || ib != lb { + return false + } + return true +} + +// EmptyLabels returns an empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +// Note this function is not efficient; should not be used in performance-critical places. +func New(ls ...Label) Labels { + slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + syms := NewSymbolTable() + var stackSpace [16]int + size, nums := mapLabelsToNumbers(syms, ls, stackSpace[:]) + buf := make([]byte, size) + marshalNumbersToSizedBuffer(nums, buf) + return Labels{syms: syms.nameTable, data: yoloString(buf)} +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + ls := make([]Label, 0, len(ss)/2) + for i := 0; i < len(ss); i += 2 { + ls = append(ls, Label{Name: ss[i], Value: ss[i+1]}) + } + + return New(ls...) +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +func Compare(a, b Labels) int { + la, lb := len(a.data), len(b.data) + ia, ib := 0, 0 + for ia < la && ib < lb { + var aName, bName string + aName, ia = decodeString(a.syms, a.data, ia) + bName, ib = decodeString(b.syms, b.data, ib) + if aName != bName { + if aName < bName { + return -1 + } + return 1 + } + var aValue, bValue string + aValue, ia = decodeString(a.syms, a.data, ia) + bValue, ib = decodeString(b.syms, b.data, ib) + if aValue != bValue { + if aValue < bValue { + return -1 + } + return 1 + } + } + // If all labels so far were in common, the set with fewer labels comes first. + return (la - ia) - (lb - ib) +} + +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + *ls = b // Straightforward memberwise copy is all we need. +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls.data) == 0 +} + +// Len returns the number of labels; it is relatively slow. +func (ls Labels) Len() int { + count := 0 + for i := 0; i < len(ls.data); { + _, i = decodeVarint(ls.data, i) + _, i = decodeVarint(ls.data, i) + count++ + } + return count +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.syms, ls.data, i) + lValue, i = decodeString(ls.syms, ls.data, i) + f(Label{Name: lName, Value: lValue}) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.syms, ls.data, i) + lValue, i = decodeString(ls.syms, ls.data, i) + err := f(Label{Name: lName, Value: lValue}) + if err != nil { + return err + } + } + return nil +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + // TODO: remove these calls as there is nothing to do. +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + // TODO: remove these calls as there is nothing to do. +} + +// DropMetricName returns Labels with "__name__" removed. +func (ls Labels) DropMetricName() Labels { + for i := 0; i < len(ls.data); { + lName, i2 := decodeString(ls.syms, ls.data, i) + _, i2 = decodeVarint(ls.data, i2) + if lName == MetricName { + if i == 0 { // Make common case fast with no allocations. + ls.data = ls.data[i2:] + } else { + ls.data = ls.data[:i] + ls.data[i2:] + } + break + } else if lName[0] > MetricName[0] { // Stop looking if we've gone past. + break + } + i = i2 + } + return ls +} + +// Builder allows modifying Labels. +type Builder struct { + syms *SymbolTable + nums []int + base Labels + del []string + add []Label +} + +// NewBuilderWithSymbolTable returns a new LabelsBuilder not based on any labels, but with the SymbolTable. +func NewBuilderWithSymbolTable(s *SymbolTable) *Builder { + return &Builder{ + syms: s, + } +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + if base.syms != nil { // If base has a symbol table, use that. + b.syms = base.syms.symbolTable + } else if b.syms == nil { // Or continue using previous symbol table in builder. + b.syms = NewSymbolTable() // Don't do this in performance-sensitive code. + } + + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + base.Range(func(l Label) { + if l.Value == "" { + b.del = append(b.del, l.Name) + } + }) +} + +// Labels returns the labels from the builder. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels() Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + slices.Sort(b.del) + a, d, newSize := 0, 0, 0 + + newSize, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) + bufSize := len(b.base.data) + newSize + buf := make([]byte, 0, bufSize) + for pos := 0; pos < len(b.base.data); { + oldPos := pos + var lName string + lName, pos = decodeString(b.base.syms, b.base.data, pos) + _, pos = decodeVarint(b.base.data, pos) + for d < len(b.del) && b.del[d] < lName { + d++ + } + if d < len(b.del) && b.del[d] == lName { + continue // This label has been deleted. + } + for ; a < len(b.add) && b.add[a].Name < lName; a++ { + buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) // Insert label that was not in the base set. + } + if a < len(b.add) && b.add[a].Name == lName { + buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) + a++ + continue // This label has been replaced. + } + buf = append(buf, b.base.data[oldPos:pos]...) // If base had a symbol-table we are using it, so we don't need to look up these symbols. + } + // We have come to the end of the base set; add any remaining labels. + for ; a < len(b.add); a++ { + buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) + } + return Labels{syms: b.syms.nameTable, data: yoloString(buf)} +} + +func marshalNumbersToSizedBuffer(nums []int, data []byte) int { + i := len(data) + for index := len(nums) - 1; index >= 0; index-- { + i = encodeVarint(data, i, nums[index]) + } + return len(data) - i +} + +func sizeVarint(x uint64) (n int) { + // Most common case first + if x < 1<<15 { + return 2 + } + if x < 1<<22 { + return 3 + } + if x >= 1<<29 { + panic("Number too large to represent") + } + return 4 +} + +func encodeVarintSlow(data []byte, offset int, v uint64) int { + offset -= sizeVarint(v) + base := offset + data[offset] = uint8(v) + v >>= 8 + offset++ + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return base +} + +// Special code for the common case that a value is less than 32768 +func encodeVarint(data []byte, offset, v int) int { + if v < 1<<15 { + offset -= 2 + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + return offset + } + return encodeVarintSlow(data, offset, uint64(v)) +} + +// Map all the strings in lbls to the symbol table; return the total size required to hold them and all the individual mappings. +func mapLabelsToNumbers(t *SymbolTable, lbls []Label, buf []int) (totalSize int, nums []int) { + nums = buf[:0] + t.mx.Lock() + defer t.mx.Unlock() + // we just encode name/value/name/value, without any extra tags or length bytes + for _, m := range lbls { + // strings are encoded as a single varint, the index into the symbol table. + i := t.toNumUnlocked(m.Name) + nums = append(nums, i) + totalSize += sizeVarint(uint64(i)) + i = t.toNumUnlocked(m.Value) + nums = append(nums, i) + totalSize += sizeVarint(uint64(i)) + } + return totalSize, nums +} + +func appendLabelTo(nameNum, valueNum int, buf []byte) []byte { + size := sizeVarint(uint64(nameNum)) + sizeVarint(uint64(valueNum)) + sizeRequired := len(buf) + size + if cap(buf) >= sizeRequired { + buf = buf[:sizeRequired] + } else { + bufSize := cap(buf) + // Double size of buffer each time it needs to grow, to amortise copying cost. + for bufSize < sizeRequired { + bufSize = bufSize*2 + 1 + } + newBuf := make([]byte, sizeRequired, bufSize) + copy(newBuf, buf) + buf = newBuf + } + i := sizeRequired + i = encodeVarint(buf, i, valueNum) + i = encodeVarint(buf, i, nameNum) + return buf +} + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + syms *SymbolTable + nums []int + add []Label + output Labels + overwriteBuffer []byte +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +// Warning: expensive; don't call in tight loops. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{syms: NewSymbolTable(), add: make([]Label, 0, n)} +} + +// NewScratchBuilderWithSymbolTable creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilderWithSymbolTable(s *SymbolTable, n int) ScratchBuilder { + return ScratchBuilder{syms: s, add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) SetSymbolTable(s *SymbolTable) { + b.syms = s +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] + b.output = EmptyLabels() +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Add a name/value pair, using []byte instead of string to reduce memory allocations. +// The values must remain live until Labels() is called. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) +} + +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(l Labels) { + b.output = l +} + +// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + if b.output.IsEmpty() { + var size int + size, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) + buf := make([]byte, size) + marshalNumbersToSizedBuffer(b.nums, buf) + b.output = Labels{syms: b.syms.nameTable, data: yoloString(buf)} + } + return b.output +} + +// Write the newly-built Labels out to ls, reusing an internal buffer. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + var size int + size, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) + if size <= cap(b.overwriteBuffer) { + b.overwriteBuffer = b.overwriteBuffer[:size] + } else { + b.overwriteBuffer = make([]byte, size) + } + marshalNumbersToSizedBuffer(b.nums, b.overwriteBuffer) + ls.syms = b.syms.nameTable + ls.data = yoloString(b.overwriteBuffer) +} diff --git a/model/labels/labels_dedupelabels_test.go b/model/labels/labels_dedupelabels_test.go new file mode 100644 index 0000000000..5ef9255c21 --- /dev/null +++ b/model/labels/labels_dedupelabels_test.go @@ -0,0 +1,50 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build dedupelabels + +package labels + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestVarint(t *testing.T) { + cases := []struct { + v int + expected []byte + }{ + {0, []byte{0, 0}}, + {1, []byte{1, 0}}, + {2, []byte{2, 0}}, + {0x7FFF, []byte{0xFF, 0x7F}}, + {0x8000, []byte{0x00, 0x80, 0x01}}, + {0x8001, []byte{0x01, 0x80, 0x01}}, + {0x3FFFFF, []byte{0xFF, 0xFF, 0x7F}}, + {0x400000, []byte{0x00, 0x80, 0x80, 0x01}}, + {0x400001, []byte{0x01, 0x80, 0x80, 0x01}}, + {0x1FFFFFFF, []byte{0xFF, 0xFF, 0xFF, 0x7F}}, + } + var buf [16]byte + for _, c := range cases { + n := encodeVarint(buf[:], len(buf), c.v) + require.Equal(t, len(c.expected), len(buf)-n) + require.Equal(t, c.expected, buf[n:]) + got, m := decodeVarint(string(buf[:]), n) + require.Equal(t, c.v, got) + require.Equal(t, len(buf), m) + } + require.Panics(t, func() { encodeVarint(buf[:], len(buf), 1<<29) }) +} diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index 223aa6ebf7..c64bb990e0 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -16,32 +16,13 @@ package labels import ( - "bytes" - "encoding/json" - "reflect" - "strconv" + "slices" + "strings" "unsafe" "github.com/cespare/xxhash/v2" - "github.com/prometheus/common/model" - "golang.org/x/exp/slices" ) -// Well-known label names used by Prometheus components. -const ( - MetricName = "__name__" - AlertName = "alertname" - BucketLabel = "le" - InstanceName = "instance" -) - -var seps = []byte{'\xff'} - -// Label is a key/value pair of strings. -type Label struct { - Name, Value string -} - // Labels is implemented by a single flat string holding name/value pairs. // Each name and value is preceded by its length in varint encoding. // Names are in order. @@ -49,12 +30,6 @@ type Labels struct { data string } -type labelSlice []Label - -func (ls labelSlice) Len() int { return len(ls) } -func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } -func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name } - func decodeSize(data string, index int) (int, int) { // Fast-path for common case of a single byte, value 0..127. b := data[index] @@ -82,26 +57,6 @@ func decodeString(data string, index int) (string, int) { return data[index : index+size], index + size } -func (ls Labels) String() string { - var b bytes.Buffer - - b.WriteByte('{') - for i := 0; i < len(ls.data); { - if i > 0 { - b.WriteByte(',') - b.WriteByte(' ') - } - var name, value string - name, i = decodeString(ls.data, i) - value, i = decodeString(ls.data, i) - b.WriteString(name) - b.WriteByte('=') - b.WriteString(strconv.Quote(value)) - } - b.WriteByte('}') - return b.String() -} - // Bytes returns ls as a byte slice. // It uses non-printing characters and so should not be used for printing. func (ls Labels) Bytes(buf []byte) []byte { @@ -114,45 +69,11 @@ func (ls Labels) Bytes(buf []byte) []byte { return buf } -// MarshalJSON implements json.Marshaler. -func (ls Labels) MarshalJSON() ([]byte, error) { - return json.Marshal(ls.Map()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (ls *Labels) UnmarshalJSON(b []byte) error { - var m map[string]string - - if err := json.Unmarshal(b, &m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - -// MarshalYAML implements yaml.Marshaler. -func (ls Labels) MarshalYAML() (interface{}, error) { - return ls.Map(), nil -} - // IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. func (ls Labels) IsZero() bool { return len(ls.data) == 0 } -// UnmarshalYAML implements yaml.Unmarshaler. -func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { - var m map[string]string - - if err := unmarshal(&m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - // MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. // If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. // TODO: This is only used in printing an error message @@ -190,9 +111,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { } if name == names[j] { b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } } @@ -216,9 +137,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -266,8 +187,7 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { // Copy returns a copy of the labels. func (ls Labels) Copy() Labels { - buf := append([]byte{}, ls.data...) - return Labels{data: yoloString(buf)} + return Labels{data: strings.Clone(ls.data)} } // Get returns the value for the label with the given name. @@ -300,13 +220,26 @@ func (ls Labels) Get(name string) string { // Has returns true if the label with the given name is present. func (ls Labels) Has(name string) bool { + if name == "" { // Avoid crash in loop if someone asks for "". + return false // Prometheus does not store blank label names. + } for i := 0; i < len(ls.data); { - var lName string - lName, i = decodeString(ls.data, i) - _, i = decodeString(ls.data, i) - if lName == name { - return true + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + return true + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size } + size, i = decodeSize(ls.data, i) + i += size } return false } @@ -356,71 +289,29 @@ func (ls Labels) WithoutEmpty() Labels { return ls } -// IsValid checks if the metric name or label names are valid. -func (ls Labels) IsValid() bool { - err := ls.Validate(func(l Label) error { - if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return strconv.ErrSyntax - } - if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { - return strconv.ErrSyntax - } - return nil - }) - return err == nil -} - // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { return ls.data == o.data } -// Map returns a string map of the labels. -func (ls Labels) Map() map[string]string { - m := make(map[string]string, len(ls.data)/10) - for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.data, i) - lValue, i = decodeString(ls.data, i) - m[lName] = lValue - } - return m -} - // EmptyLabels returns an empty Labels value, for convenience. func EmptyLabels() Labels { return Labels{} } - -func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) -} - -func yoloBytes(s string) (b []byte) { - *(*string)(unsafe.Pointer(&b)) = s - (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) - return +func yoloBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) } // New returns a sorted Labels from the given labels. // The caller has to guarantee that all label names are unique. func New(ls ...Label) Labels { - slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) size := labelsSize(ls) buf := make([]byte, size) marshalLabelsToSizedBuffer(ls, buf) return Labels{data: yoloString(buf)} } -// FromMap returns new sorted Labels from the given map. -func FromMap(m map[string]string) Labels { - l := make([]Label, 0, len(m)) - for k, v := range m { - l = append(l, Label{Name: k, Value: v}) - } - return New(l...) -} - // FromStrings creates new labels from pairs of strings. func FromStrings(ss ...string) Labels { if len(ss)%2 != 0 { @@ -444,8 +335,8 @@ func Compare(a, b Labels) int { } i := 0 // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. - sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) - lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + sp := unsafe.Pointer(unsafe.StringData(shorter)) + lp := unsafe.Pointer(unsafe.StringData(longer)) for ; i < len(shorter)-8; i += 8 { if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { break @@ -464,13 +355,11 @@ func Compare(a, b Labels) int { // Now we know that there is some difference before the end of a and b. // Go back through the fields and find which field that difference is in. - firstCharDifferent := i - for i = 0; ; { - size, nextI := decodeSize(a.data, i) - if nextI+size > firstCharDifferent { - break - } + firstCharDifferent, i := i, 0 + size, nextI := decodeSize(a.data, i) + for nextI+size <= firstCharDifferent { i = nextI + size + size, nextI = decodeSize(a.data, i) } // Difference is inside this entry. aStr, _ := decodeString(a.data, i) @@ -529,14 +418,33 @@ func (ls Labels) Validate(f func(l Label) error) error { return nil } -// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +// DropMetricName returns Labels with "__name__" removed. +func (ls Labels) DropMetricName() Labels { + for i := 0; i < len(ls.data); { + lName, i2 := decodeString(ls.data, i) + size, i2 := decodeSize(ls.data, i2) + i2 += size + if lName == MetricName { + if i == 0 { // Make common case fast with no allocations. + ls.data = ls.data[i2:] + } else { + ls.data = ls.data[:i] + ls.data[i2:] + } + break + } else if lName[0] > MetricName[0] { // Stop looking if we've gone past. + break + } + i = i2 + } + return ls +} + +// InternStrings is a no-op because it would only save when the whole set of labels is identical. func (ls *Labels) InternStrings(intern func(string) string) { - ls.data = intern(ls.data) } -// ReleaseStrings calls release on every string value inside ls. +// ReleaseStrings is a no-op for the same reason as InternStrings. func (ls Labels) ReleaseStrings(release func(string)) { - release(ls.data) } // Builder allows modifying Labels. @@ -546,115 +454,16 @@ type Builder struct { add []Label } -// NewBuilder returns a new LabelsBuilder. -func NewBuilder(base Labels) *Builder { - b := &Builder{ - del: make([]string, 0, 5), - add: make([]Label, 0, 5), - } - b.Reset(base) - return b -} - // Reset clears all current state for the builder. func (b *Builder) Reset(base Labels) { b.base = base b.del = b.del[:0] b.add = b.add[:0] - for i := 0; i < len(base.data); { - var lName, lValue string - lName, i = decodeString(base.data, i) - lValue, i = decodeString(base.data, i) - if lValue == "" { - b.del = append(b.del, lName) - } - } -} - -// Del deletes the label of the given name. -func (b *Builder) Del(ns ...string) *Builder { - for _, n := range ns { - for i, a := range b.add { - if a.Name == n { - b.add = append(b.add[:i], b.add[i+1:]...) - } - } - b.del = append(b.del, n) - } - return b -} - -// Keep removes all labels from the base except those with the given names. -func (b *Builder) Keep(ns ...string) *Builder { -Outer: - for i := 0; i < len(b.base.data); { - var lName string - lName, i = decodeString(b.base.data, i) - _, i = decodeString(b.base.data, i) - for _, n := range ns { - if lName == n { - continue Outer - } - } - b.del = append(b.del, lName) - } - return b -} - -// Set the name/value pair as a label. A value of "" means delete that label. -func (b *Builder) Set(n, v string) *Builder { - if v == "" { - // Empty labels are the same as missing labels. - return b.Del(n) - } - for i, a := range b.add { - if a.Name == n { - b.add[i].Value = v - return b - } - } - b.add = append(b.add, Label{Name: n, Value: v}) - - return b -} - -func (b *Builder) Get(n string) string { - // Del() removes entries from .add but Set() does not remove from .del, so check .add first. - for _, a := range b.add { - if a.Name == n { - return a.Value - } - } - if slices.Contains(b.del, n) { - return "" - } - return b.base.Get(n) -} - -// Range calls f on each label in the Builder. -func (b *Builder) Range(f func(l Label)) { - // Stack-based arrays to avoid heap allocation in most cases. - var addStack [128]Label - var delStack [128]string - // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). - origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) b.base.Range(func(l Label) { - if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { - f(l) + if l.Value == "" { + b.del = append(b.del, l.Name) } }) - for _, a := range origAdd { - f(a) - } -} - -func contains(s []Label, n string) bool { - for _, a := range s { - if a.Name == n { - return true - } - } - return false } // Labels returns the labels from the builder. @@ -664,7 +473,7 @@ func (b *Builder) Labels() Labels { return b.base } - slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) slices.Sort(b.del) a, d := 0, 0 @@ -821,9 +630,15 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } +// Add a name/value pair, using []byte instead of string to reduce memory allocations. +// The values must remain live until Labels() is called. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) +} + // Sort the labels added so far by name. func (b *ScratchBuilder) Sort() { - slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) } // Assign is for when you already have a Labels which you want this ScratchBuilder to return. @@ -855,3 +670,24 @@ func (b *ScratchBuilder) Overwrite(ls *Labels) { marshalLabelsToSizedBuffer(b.add, b.overwriteBuffer) ls.data = yoloString(b.overwriteBuffer) } + +// Symbol-table is no-op, just for api parity with dedupelabels. +type SymbolTable struct{} + +func NewSymbolTable() *SymbolTable { return nil } + +func (t *SymbolTable) Len() int { return 0 } + +// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels. +func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder { + return NewBuilder(EmptyLabels()) +} + +// NewScratchBuilderWithSymbolTable creates a ScratchBuilder, for api parity with dedupelabels. +func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { + return NewScratchBuilder(n) +} + +func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { + // no-op +} diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index d91be27cbc..9208908311 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -16,33 +16,43 @@ package labels import ( "encoding/json" "fmt" + "net/http" + "strconv" "strings" "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) func TestLabels_String(t *testing.T) { cases := []struct { - lables Labels + labels Labels expected string }{ { - lables: FromStrings("t1", "t1", "t2", "t2"), + labels: FromStrings("t1", "t1", "t2", "t2"), expected: "{t1=\"t1\", t2=\"t2\"}", }, { - lables: Labels{}, + labels: Labels{}, expected: "{}", }, } for _, c := range cases { - str := c.lables.String() + str := c.labels.String() require.Equal(t, c.expected, str) } } +func BenchmarkString(b *testing.B) { + ls := New(benchmarkLabels...) + for i := 0; i < b.N; i++ { + _ = ls.String() + } +} + func TestLabels_MatchLabels(t *testing.T) { labels := FromStrings( "__name__", "ALERTS", @@ -114,7 +124,7 @@ func TestLabels_MatchLabels(t *testing.T) { for i, test := range tests { got := labels.MatchLabels(test.on, test.providedNames...) - require.Equal(t, test.expected, got, "unexpected labelset for test case %d", i) + require.True(t, Equal(test.expected, got), "unexpected labelset for test case %d", i) } } @@ -207,7 +217,7 @@ func TestLabels_WithoutEmpty(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - require.Equal(t, test.expected, test.input.WithoutEmpty()) + require.True(t, Equal(test.expected, test.input.WithoutEmpty())) }) } } @@ -263,11 +273,86 @@ func TestLabels_IsValid(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - require.Equal(t, test.expected, test.input.IsValid()) + require.Equal(t, test.expected, test.input.IsValid(model.LegacyValidation)) }) } } +func TestLabels_ValidationModes(t *testing.T) { + for _, test := range []struct { + input Labels + globalMode model.ValidationScheme + callMode model.ValidationScheme + expected bool + }{ + { + input: FromStrings( + "__name__", "test.metric", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.UTF8Validation, + expected: true, + }, + { + input: FromStrings( + "__name__", "test", + "\xc5 bad utf8", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.UTF8Validation, + expected: false, + }, + { + // Setting the common model to legacy validation and then trying to check for UTF-8 on a + // per-call basis is not supported. + input: FromStrings( + "__name__", "test.utf8.metric", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.LegacyValidation, + callMode: model.UTF8Validation, + expected: false, + }, + { + input: FromStrings( + "__name__", "test", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.LegacyValidation, + callMode: model.LegacyValidation, + expected: true, + }, + { + input: FromStrings( + "__name__", "test.utf8.metric", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.LegacyValidation, + expected: false, + }, + { + input: FromStrings( + "__name__", "test", + "host.name", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.LegacyValidation, + expected: false, + }, + } { + model.NameValidationScheme = test.globalMode + require.Equal(t, test.expected, test.input.IsValid(test.callMode)) + } +} + func TestLabels_Equal(t *testing.T) { labels := FromStrings( "aaa", "111", @@ -447,19 +532,62 @@ func TestLabels_Get(t *testing.T) { require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb")) } +func TestLabels_DropMetricName(t *testing.T) { + require.True(t, Equal(FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").DropMetricName())) + require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(MetricName, "myname", "aaa", "111").DropMetricName())) + + original := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222") + check := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222") + require.True(t, Equal(FromStrings("__aaa__", "111", "bbb", "222"), check.DropMetricName())) + require.True(t, Equal(original, check)) +} + +func ScratchBuilderForBenchmark() ScratchBuilder { + // (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.) + b := NewScratchBuilder(256) + for i := 0; i < 256; i++ { + b.Add(fmt.Sprintf("name%d", i), fmt.Sprintf("value%d", i)) + } + b.Labels() + b.Reset() + return b +} + +func NewForBenchmark(ls ...Label) Labels { + b := ScratchBuilderForBenchmark() + for _, l := range ls { + b.Add(l.Name, l.Value) + } + b.Sort() + return b.Labels() +} + +func FromStringsForBenchmark(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + b := ScratchBuilderForBenchmark() + for i := 0; i < len(ss); i += 2 { + b.Add(ss[i], ss[i+1]) + } + b.Sort() + return b.Labels() +} + // BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation // The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels. // In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here) -// name old time/op new time/op delta -// Labels_Get/with_5_labels/get_first_label 5.12ns ± 0% 14.24ns ± 0% ~ (p=1.000 n=1+1) -// Labels_Get/with_5_labels/get_middle_label 13.5ns ± 0% 18.5ns ± 0% ~ (p=1.000 n=1+1) -// Labels_Get/with_5_labels/get_last_label 21.9ns ± 0% 18.9ns ± 0% ~ (p=1.000 n=1+1) -// Labels_Get/with_10_labels/get_first_label 5.11ns ± 0% 19.47ns ± 0% ~ (p=1.000 n=1+1) -// Labels_Get/with_10_labels/get_middle_label 26.2ns ± 0% 19.3ns ± 0% ~ (p=1.000 n=1+1) -// Labels_Get/with_10_labels/get_last_label 42.8ns ± 0% 23.4ns ± 0% ~ (p=1.000 n=1+1) -// Labels_Get/with_30_labels/get_first_label 5.10ns ± 0% 24.63ns ± 0% ~ (p=1.000 n=1+1) -// Labels_Get/with_30_labels/get_middle_label 75.8ns ± 0% 29.7ns ± 0% ~ (p=1.000 n=1+1) -// Labels_Get/with_30_labels/get_last_label 169ns ± 0% 29ns ± 0% ~ (p=1.000 n=1+1) +// +// name old time/op new time/op delta +// Labels_Get/with_5_labels/get_first_label 5.12ns ± 0% 14.24ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_5_labels/get_middle_label 13.5ns ± 0% 18.5ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_5_labels/get_last_label 21.9ns ± 0% 18.9ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_10_labels/get_first_label 5.11ns ± 0% 19.47ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_10_labels/get_middle_label 26.2ns ± 0% 19.3ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_10_labels/get_last_label 42.8ns ± 0% 23.4ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_30_labels/get_first_label 5.10ns ± 0% 24.63ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_30_labels/get_middle_label 75.8ns ± 0% 29.7ns ± 0% ~ (p=1.000 n=1+1) +// Labels_Get/with_30_labels/get_last_label 169ns ± 0% 29ns ± 0% ~ (p=1.000 n=1+1) func BenchmarkLabels_Get(b *testing.B) { maxLabels := 30 allLabels := make([]Label, maxLabels) @@ -468,20 +596,26 @@ func BenchmarkLabels_Get(b *testing.B) { } for _, size := range []int{5, 10, maxLabels} { b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) { - labels := New(allLabels[:size]...) + labels := NewForBenchmark(allLabels[:size]...) for _, scenario := range []struct { desc, label string }{ - {"get first label", allLabels[0].Name}, - {"get middle label", allLabels[size/2].Name}, - {"get last label", allLabels[size-1].Name}, - {"get not-found label", "benchmark"}, + {"first label", allLabels[0].Name}, + {"middle label", allLabels[size/2].Name}, + {"last label", allLabels[size-1].Name}, + {"not-found label", "benchmark"}, } { b.Run(scenario.desc, func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = labels.Get(scenario.label) - } + b.Run("get", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = labels.Get(scenario.label) + } + }) + b.Run("has", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = labels.Has(scenario.label) + } + }) }) } }) @@ -494,23 +628,33 @@ var comparisonBenchmarkScenarios = []struct { }{ { "equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), }, { "not equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), }, { "different sizes", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value"), }, { "lots", - FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), - FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), + FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), + FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), + }, + { + "real long equal", + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + }, + { + "real long different end", + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"), }, } @@ -556,6 +700,7 @@ func TestLabels_BytesWithoutLabels(t *testing.T) { } func TestBuilder(t *testing.T) { + reuseBuilder := NewBuilderWithSymbolTable(NewSymbolTable()) for i, tcase := range []struct { base Labels del []string @@ -567,6 +712,11 @@ func TestBuilder(t *testing.T) { base: FromStrings("aaa", "111"), want: FromStrings("aaa", "111"), }, + { + base: EmptyLabels(), + set: []Label{{"aaa", "444"}, {"bbb", "555"}, {"ccc", "666"}}, + want: FromStrings("aaa", "444", "bbb", "555", "ccc", "666"), + }, { base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), set: []Label{{"aaa", "444"}, {"bbb", "555"}, {"ccc", "666"}}, @@ -625,8 +775,7 @@ func TestBuilder(t *testing.T) { want: FromStrings("aaa", "111", "ddd", "444"), }, } { - t.Run(fmt.Sprint(i), func(t *testing.T) { - b := NewBuilder(tcase.base) + test := func(t *testing.T, b *Builder) { for _, lbl := range tcase.set { b.Set(lbl.Name, lbl.Value) } @@ -634,7 +783,7 @@ func TestBuilder(t *testing.T) { b.Keep(tcase.keep...) } b.Del(tcase.del...) - require.Equal(t, tcase.want, b.Labels()) + require.True(t, Equal(tcase.want, b.Labels())) // Check what happens when we call Range and mutate the builder. b.Range(func(l Label) { @@ -643,6 +792,18 @@ func TestBuilder(t *testing.T) { } }) require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels().Bytes(nil)) + } + t.Run(fmt.Sprintf("NewBuilder %d", i), func(t *testing.T) { + test(t, NewBuilder(tcase.base)) + }) + t.Run(fmt.Sprintf("NewSymbolTable %d", i), func(t *testing.T) { + b := NewBuilderWithSymbolTable(NewSymbolTable()) + b.Reset(tcase.base) + test(t, b) + }) + t.Run(fmt.Sprintf("reuseBuilder %d", i), func(t *testing.T) { + reuseBuilder.Reset(tcase.base) + test(t, reuseBuilder) }) } t.Run("set_after_del", func(t *testing.T) { @@ -680,22 +841,23 @@ func TestScratchBuilder(t *testing.T) { want: FromStrings("ddd", "444"), }, } { - t.Run(fmt.Sprint(i), func(t *testing.T) { - b := ScratchBuilder{} + t.Run(strconv.Itoa(i), func(t *testing.T) { + b := NewScratchBuilder(len(tcase.add)) for _, lbl := range tcase.add { b.Add(lbl.Name, lbl.Value) } b.Sort() - require.Equal(t, tcase.want, b.Labels()) + require.True(t, Equal(tcase.want, b.Labels())) b.Assign(tcase.want) - require.Equal(t, tcase.want, b.Labels()) + require.True(t, Equal(tcase.want, b.Labels())) }) } } func TestLabels_Hash(t *testing.T) { lbls := FromStrings("foo", "bar", "baz", "qux") - require.Equal(t, lbls.Hash(), lbls.Hash()) + hash1, hash2 := lbls.Hash(), lbls.Hash() + require.Equal(t, hash1, hash2) require.NotEqual(t, lbls.Hash(), FromStrings("foo", "bar").Hash(), "different labels match.") } @@ -754,24 +916,24 @@ func BenchmarkLabels_Hash(b *testing.B) { } } -func BenchmarkBuilder(b *testing.B) { - m := []Label{ - {"job", "node"}, - {"instance", "123.123.1.211:9090"}, - {"path", "/api/v1/namespaces//deployments/"}, - {"method", "GET"}, - {"namespace", "system"}, - {"status", "500"}, - {"prometheus", "prometheus-core-1"}, - {"datacenter", "eu-west-1"}, - {"pod_name", "abcdef-99999-defee"}, - } +var benchmarkLabels = []Label{ + {"job", "node"}, + {"instance", "123.123.1.211:9090"}, + {"path", "/api/v1/namespaces//deployments/"}, + {"method", http.MethodGet}, + {"namespace", "system"}, + {"status", "500"}, + {"prometheus", "prometheus-core-1"}, + {"datacenter", "eu-west-1"}, + {"pod_name", "abcdef-99999-defee"}, +} +func BenchmarkBuilder(b *testing.B) { var l Labels builder := NewBuilder(EmptyLabels()) for i := 0; i < b.N; i++ { builder.Reset(EmptyLabels()) - for _, l := range m { + for _, l := range benchmarkLabels { builder.Set(l.Name, l.Value) } l = builder.Labels() @@ -780,18 +942,7 @@ func BenchmarkBuilder(b *testing.B) { } func BenchmarkLabels_Copy(b *testing.B) { - m := map[string]string{ - "job": "node", - "instance": "123.123.1.211:9090", - "path": "/api/v1/namespaces//deployments/", - "method": "GET", - "namespace": "system", - "status": "500", - "prometheus": "prometheus-core-1", - "datacenter": "eu-west-1", - "pod_name": "abcdef-99999-defee", - } - l := FromMap(m) + l := NewForBenchmark(benchmarkLabels...) for i := 0; i < b.N; i++ { l = l.Copy() diff --git a/model/labels/matcher.go b/model/labels/matcher.go index f299c40f64..a09c838e3f 100644 --- a/model/labels/matcher.go +++ b/model/labels/matcher.go @@ -14,7 +14,8 @@ package labels import ( - "fmt" + "bytes" + "strconv" ) // MatchType is an enum for label matching types. @@ -78,7 +79,29 @@ func MustNewMatcher(mt MatchType, name, val string) *Matcher { } func (m *Matcher) String() string { - return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value) + // Start a buffer with a pre-allocated size on stack to cover most needs. + var bytea [1024]byte + b := bytes.NewBuffer(bytea[:0]) + + if m.shouldQuoteName() { + b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Name)) + } else { + b.WriteString(m.Name) + } + b.WriteString(m.Type.String()) + b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Value)) + + return b.String() +} + +func (m *Matcher) shouldQuoteName() bool { + for i, c := range m.Name { + if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (i > 0 && c >= '0' && c <= '9') { + continue + } + return true + } + return len(m.Name) == 0 } // Matches returns whether the matcher matches the given string value. @@ -118,3 +141,30 @@ func (m *Matcher) GetRegexString() string { } return m.re.GetRegexString() } + +// SetMatches returns a set of equality matchers for the current regex matchers if possible. +// For examples the regexp `a(b|f)` will returns "ab" and "af". +// Returns nil if we can't replace the regexp by only equality matchers. +func (m *Matcher) SetMatches() []string { + if m.re == nil { + return nil + } + return m.re.SetMatches() +} + +// Prefix returns the required prefix of the value to match, if possible. +// It will be empty if it's an equality matcher or if the prefix can't be determined. +func (m *Matcher) Prefix() string { + if m.re == nil { + return "" + } + return m.re.prefix +} + +// IsRegexOptimized returns whether regex is optimized. +func (m *Matcher) IsRegexOptimized() bool { + if m.re == nil { + return false + } + return m.re.IsOptimized() +} diff --git a/model/labels/matcher_test.go b/model/labels/matcher_test.go index d26e9329f2..ff39d40d0f 100644 --- a/model/labels/matcher_test.go +++ b/model/labels/matcher_test.go @@ -14,13 +14,15 @@ package labels import ( + "fmt" + "math/rand" "testing" "github.com/stretchr/testify/require" ) func mustNewMatcher(t *testing.T, mType MatchType, value string) *Matcher { - m, err := NewMatcher(mType, "", value) + m, err := NewMatcher(mType, "test_label_name", value) require.NoError(t, err) return m } @@ -81,6 +83,21 @@ func TestMatcher(t *testing.T) { value: "foo-bar", match: false, }, + { + matcher: mustNewMatcher(t, MatchRegexp, "$*bar"), + value: "foo-bar", + match: false, + }, + { + matcher: mustNewMatcher(t, MatchRegexp, "bar^+"), + value: "foo-bar", + match: false, + }, + { + matcher: mustNewMatcher(t, MatchRegexp, "$+bar"), + value: "foo-bar", + match: false, + }, } for _, test := range tests { @@ -118,6 +135,82 @@ func TestInverse(t *testing.T) { } } +func TestPrefix(t *testing.T) { + for i, tc := range []struct { + matcher *Matcher + prefix string + }{ + { + matcher: mustNewMatcher(t, MatchEqual, "abc"), + prefix: "", + }, + { + matcher: mustNewMatcher(t, MatchNotEqual, "abc"), + prefix: "", + }, + { + matcher: mustNewMatcher(t, MatchRegexp, "abc.+"), + prefix: "abc", + }, + { + matcher: mustNewMatcher(t, MatchRegexp, "abcd|abc.+"), + prefix: "abc", + }, + { + matcher: mustNewMatcher(t, MatchNotRegexp, "abcd|abc.+"), + prefix: "abc", + }, + { + matcher: mustNewMatcher(t, MatchRegexp, "abc(def|ghj)|ab|a."), + prefix: "a", + }, + { + matcher: mustNewMatcher(t, MatchRegexp, "foo.+bar|foo.*baz"), + prefix: "foo", + }, + { + matcher: mustNewMatcher(t, MatchRegexp, "abc|.*"), + prefix: "", + }, + { + matcher: mustNewMatcher(t, MatchRegexp, "abc|def"), + prefix: "", + }, + { + matcher: mustNewMatcher(t, MatchRegexp, ".+def"), + prefix: "", + }, + } { + t.Run(fmt.Sprintf("%d: %s", i, tc.matcher), func(t *testing.T) { + require.Equal(t, tc.prefix, tc.matcher.Prefix()) + }) + } +} + +func TestIsRegexOptimized(t *testing.T) { + for i, tc := range []struct { + matcher *Matcher + isRegexOptimized bool + }{ + { + matcher: mustNewMatcher(t, MatchEqual, "abc"), + isRegexOptimized: false, + }, + { + matcher: mustNewMatcher(t, MatchRegexp, "."), + isRegexOptimized: false, + }, + { + matcher: mustNewMatcher(t, MatchRegexp, "abc.+"), + isRegexOptimized: true, + }, + } { + t.Run(fmt.Sprintf("%d: %s", i, tc.matcher), func(t *testing.T) { + require.Equal(t, tc.isRegexOptimized, tc.matcher.IsRegexOptimized()) + }) + } +} + func BenchmarkMatchType_String(b *testing.B) { for i := 0; i <= b.N; i++ { _ = MatchType(i % int(MatchNotRegexp+1)).String() @@ -133,3 +226,128 @@ func BenchmarkNewMatcher(b *testing.B) { } }) } + +func BenchmarkMatcher_String(b *testing.B) { + type benchCase struct { + name string + matchers []*Matcher + } + cases := []benchCase{ + { + name: "short name equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "foo", "bar"), + MustNewMatcher(MatchEqual, "bar", "baz"), + MustNewMatcher(MatchEqual, "abc", "def"), + MustNewMatcher(MatchEqual, "ghi", "klm"), + MustNewMatcher(MatchEqual, "nop", "qrs"), + }, + }, + { + name: "short quoted name not equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "f.o", "bar"), + MustNewMatcher(MatchEqual, "b.r", "baz"), + MustNewMatcher(MatchEqual, "a.c", "def"), + MustNewMatcher(MatchEqual, "g.i", "klm"), + MustNewMatcher(MatchEqual, "n.p", "qrs"), + }, + }, + { + name: "short quoted name with quotes not equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, `"foo"`, "bar"), + MustNewMatcher(MatchEqual, `"foo"`, "baz"), + MustNewMatcher(MatchEqual, `"foo"`, "def"), + MustNewMatcher(MatchEqual, `"foo"`, "klm"), + MustNewMatcher(MatchEqual, `"foo"`, "qrs"), + }, + }, + { + name: "short name value with quotes equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "foo", `"bar"`), + MustNewMatcher(MatchEqual, "bar", `"baz"`), + MustNewMatcher(MatchEqual, "abc", `"def"`), + MustNewMatcher(MatchEqual, "ghi", `"klm"`), + MustNewMatcher(MatchEqual, "nop", `"qrs"`), + }, + }, + { + name: "short name and long value regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "foo", "five_six_seven_eight_nine_ten_one_two_three_four"), + MustNewMatcher(MatchRegexp, "bar", "one_two_three_four_five_six_seven_eight_nine_ten"), + MustNewMatcher(MatchRegexp, "abc", "two_three_four_five_six_seven_eight_nine_ten_one"), + MustNewMatcher(MatchRegexp, "ghi", "three_four_five_six_seven_eight_nine_ten_one_two"), + MustNewMatcher(MatchRegexp, "nop", "four_five_six_seven_eight_nine_ten_one_two_three"), + }, + }, + { + name: "short name and long value with quotes equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "foo", `five_six_seven_eight_nine_ten_"one"_two_three_four`), + MustNewMatcher(MatchEqual, "bar", `one_two_three_four_five_six_"seven"_eight_nine_ten`), + MustNewMatcher(MatchEqual, "abc", `two_three_four_five_six_seven_"eight"_nine_ten_one`), + MustNewMatcher(MatchEqual, "ghi", `three_four_five_six_seven_eight_"nine"_ten_one_two`), + MustNewMatcher(MatchEqual, "nop", `four_five_six_seven_eight_nine_"ten"_one_two_three`), + }, + }, + { + name: "long name regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "val"), + MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "val"), + MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "val"), + MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "val"), + MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "val"), + }, + }, + { + name: "long quoted name regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "val"), + MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "val"), + MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "val"), + MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "val"), + MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "val"), + }, + }, + { + name: "long name and long value regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "five_six_seven_eight_nine_ten_one_two_three_four"), + MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "one_two_three_four_five_six_seven_eight_nine_ten"), + MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "two_three_four_five_six_seven_eight_nine_ten_one"), + MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "three_four_five_six_seven_eight_nine_ten_one_two"), + MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "four_five_six_seven_eight_nine_ten_one_two_three"), + }, + }, + { + name: "long quoted name and long value regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "five.six.seven.eight.nine.ten.one.two.three.four"), + MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "one.two.three.four.five.six.seven.eight.nine.ten"), + MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "two.three.four.five.six.seven.eight.nine.ten.one"), + MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "three.four.five.six.seven.eight.nine.ten.one.two"), + MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "four.five.six.seven.eight.nine.ten.one.two.three"), + }, + }, + } + + var mixed []*Matcher + for _, bc := range cases { + mixed = append(mixed, bc.matchers...) + } + rand.Shuffle(len(mixed), func(i, j int) { mixed[i], mixed[j] = mixed[j], mixed[i] }) + cases = append(cases, benchCase{name: "mixed", matchers: mixed}) + + for _, bc := range cases { + b.Run(bc.name, func(b *testing.B) { + for i := 0; i <= b.N; i++ { + m := bc.matchers[i%len(bc.matchers)] + _ = m.String() + } + }) + } +} diff --git a/model/labels/regexp.go b/model/labels/regexp.go index 14319c7f7a..3df9435194 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -14,79 +14,358 @@ package labels import ( + "slices" "strings" + "unicode" + "unicode/utf8" "github.com/grafana/regexp" "github.com/grafana/regexp/syntax" + "golang.org/x/text/unicode/norm" +) + +const ( + maxSetMatches = 256 + + // The minimum number of alternate values a regex should have to trigger + // the optimization done by optimizeEqualOrPrefixStringMatchers() and so use a map + // to match values instead of iterating over a list. This value has + // been computed running BenchmarkOptimizeEqualStringMatchers. + minEqualMultiStringMatcherMapThreshold = 16 ) type FastRegexMatcher struct { + // Under some conditions, re is nil because the expression is never parsed. + // We store the original string to be able to return it in GetRegexString(). + reString string re *regexp.Regexp - prefix string - suffix string - contains string - // shortcut for literals - literal bool - value string + setMatches []string + stringMatcher StringMatcher + prefix string + suffix string + contains []string + + // matchString is the "compiled" function to run by MatchString(). + matchString func(string) bool } func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { - if isLiteral(v) { - return &FastRegexMatcher{literal: true, value: v}, nil + m := &FastRegexMatcher{ + reString: v, } - re, err := regexp.Compile("^(?:" + v + ")$") - if err != nil { - return nil, err + + m.stringMatcher, m.setMatches = optimizeAlternatingLiterals(v) + if m.stringMatcher != nil { + // If we already have a string matcher, we don't need to parse the regex + // or compile the matchString function. This also avoids the behavior in + // compileMatchStringFunction where it prefers to use setMatches when + // available, even if the string matcher is faster. + m.matchString = m.stringMatcher.Matches + } else { + parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL) + if err != nil { + return nil, err + } + // Simplify the syntax tree to run faster. + parsed = parsed.Simplify() + m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$") + if err != nil { + return nil, err + } + if parsed.Op == syntax.OpConcat { + m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed) + } + if matches, caseSensitive := findSetMatches(parsed); caseSensitive { + m.setMatches = matches + } + m.stringMatcher = stringMatcherFromRegexp(parsed) + m.matchString = m.compileMatchStringFunction() } - parsed, err := syntax.Parse(v, syntax.Perl) - if err != nil { - return nil, err + return m, nil +} + +// compileMatchStringFunction returns the function to run by MatchString(). +func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool { + // If the only optimization available is the string matcher, then we can just run it. + if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && len(m.contains) == 0 && m.stringMatcher != nil { + return m.stringMatcher.Matches } - m := &FastRegexMatcher{ - re: re, + return func(s string) bool { + if len(m.setMatches) != 0 { + for _, match := range m.setMatches { + if match == s { + return true + } + } + return false + } + if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { + return false + } + if m.suffix != "" && !strings.HasSuffix(s, m.suffix) { + return false + } + if len(m.contains) > 0 && !containsInOrder(s, m.contains) { + return false + } + if m.stringMatcher != nil { + return m.stringMatcher.Matches(s) + } + return m.re.MatchString(s) } +} - if parsed.Op == syntax.OpConcat { - m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed) +// IsOptimized returns true if any fast-path optimization is applied to the +// regex matcher. +func (m *FastRegexMatcher) IsOptimized() bool { + return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || len(m.contains) > 0 +} + +// findSetMatches extract equality matches from a regexp. +// Returns nil if we can't replace the regexp by only equality matchers or the regexp contains +// a mix of case sensitive and case insensitive matchers. +func findSetMatches(re *syntax.Regexp) (matches []string, caseSensitive bool) { + clearBeginEndText(re) + + return findSetMatchesInternal(re, "") +} + +func findSetMatchesInternal(re *syntax.Regexp, base string) (matches []string, caseSensitive bool) { + switch re.Op { + case syntax.OpBeginText: + // Correctly handling the begin text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil, false + case syntax.OpEndText: + // Correctly handling the end text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil, false + case syntax.OpLiteral: + return []string{base + string(re.Rune)}, isCaseSensitive(re) + case syntax.OpEmptyMatch: + if base != "" { + return []string{base}, isCaseSensitive(re) + } + case syntax.OpAlternate: + return findSetMatchesFromAlternate(re, base) + case syntax.OpCapture: + clearCapture(re) + return findSetMatchesInternal(re, base) + case syntax.OpConcat: + return findSetMatchesFromConcat(re, base) + case syntax.OpCharClass: + if len(re.Rune)%2 != 0 { + return nil, false + } + var matches []string + var totalSet int + for i := 0; i+1 < len(re.Rune); i += 2 { + totalSet += int(re.Rune[i+1]-re.Rune[i]) + 1 + } + // limits the total characters that can be used to create matches. + // In some case like negation [^0-9] a lot of possibilities exists and that + // can create thousands of possible matches at which points we're better off using regexp. + if totalSet > maxSetMatches { + return nil, false + } + for i := 0; i+1 < len(re.Rune); i += 2 { + lo, hi := re.Rune[i], re.Rune[i+1] + for c := lo; c <= hi; c++ { + matches = append(matches, base+string(c)) + } + } + return matches, isCaseSensitive(re) + default: + return nil, false } + return nil, false +} - return m, nil +func findSetMatchesFromConcat(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) { + if len(re.Sub) == 0 { + return nil, false + } + clearCapture(re.Sub...) + + matches = []string{base} + + for i := 0; i < len(re.Sub); i++ { + var newMatches []string + for j, b := range matches { + m, caseSensitive := findSetMatchesInternal(re.Sub[i], b) + if m == nil { + return nil, false + } + if tooManyMatches(newMatches, m...) { + return nil, false + } + + // All matches must have the same case sensitivity. If it's the first set of matches + // returned, we store its sensitivity as the expected case, and then we'll check all + // other ones. + if i == 0 && j == 0 { + matchesCaseSensitive = caseSensitive + } + if matchesCaseSensitive != caseSensitive { + return nil, false + } + + newMatches = append(newMatches, m...) + } + matches = newMatches + } + + return matches, matchesCaseSensitive } -func (m *FastRegexMatcher) MatchString(s string) bool { - if m.literal { - return s == m.value +func findSetMatchesFromAlternate(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) { + for i, sub := range re.Sub { + found, caseSensitive := findSetMatchesInternal(sub, base) + if found == nil { + return nil, false + } + if tooManyMatches(matches, found...) { + return nil, false + } + + // All matches must have the same case sensitivity. If it's the first set of matches + // returned, we store its sensitivity as the expected case, and then we'll check all + // other ones. + if i == 0 { + matchesCaseSensitive = caseSensitive + } + if matchesCaseSensitive != caseSensitive { + return nil, false + } + + matches = append(matches, found...) } - if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { - return false + + return matches, matchesCaseSensitive +} + +// clearCapture removes capture operation as they are not used for matching. +func clearCapture(regs ...*syntax.Regexp) { + for _, r := range regs { + // Iterate on the regexp because capture groups could be nested. + for r.Op == syntax.OpCapture { + *r = *r.Sub[0] + } } - if m.suffix != "" && !strings.HasSuffix(s, m.suffix) { - return false +} + +// clearBeginEndText removes the begin and end text from the regexp. Prometheus regexp are anchored to the beginning and end of the string. +func clearBeginEndText(re *syntax.Regexp) { + // Do not clear begin/end text from an alternate operator because it could + // change the actual regexp properties. + if re.Op == syntax.OpAlternate { + return } - if m.contains != "" && !strings.Contains(s, m.contains) { - return false + + if len(re.Sub) == 0 { + return } - return m.re.MatchString(s) + if len(re.Sub) == 1 { + if re.Sub[0].Op == syntax.OpBeginText || re.Sub[0].Op == syntax.OpEndText { + // We need to remove this element. Since it's the only one, we convert into a matcher of an empty string. + // OpEmptyMatch is regexp's nop operator. + re.Op = syntax.OpEmptyMatch + re.Sub = nil + return + } + } + if re.Sub[0].Op == syntax.OpBeginText { + re.Sub = re.Sub[1:] + } + if re.Sub[len(re.Sub)-1].Op == syntax.OpEndText { + re.Sub = re.Sub[:len(re.Sub)-1] + } +} + +// isCaseInsensitive tells if a regexp is case insensitive. +// The flag should be check at each level of the syntax tree. +func isCaseInsensitive(reg *syntax.Regexp) bool { + return (reg.Flags & syntax.FoldCase) != 0 +} + +// isCaseSensitive tells if a regexp is case sensitive. +// The flag should be check at each level of the syntax tree. +func isCaseSensitive(reg *syntax.Regexp) bool { + return !isCaseInsensitive(reg) +} + +// tooManyMatches guards against creating too many set matches. +func tooManyMatches(matches []string, added ...string) bool { + return len(matches)+len(added) > maxSetMatches +} + +func (m *FastRegexMatcher) MatchString(s string) bool { + return m.matchString(s) +} + +func (m *FastRegexMatcher) SetMatches() []string { + // IMPORTANT: always return a copy, otherwise if the caller manipulate this slice it will + // also get manipulated in the cached FastRegexMatcher instance. + return slices.Clone(m.setMatches) } func (m *FastRegexMatcher) GetRegexString() string { - if m.literal { - return m.value - } - return m.re.String() + return m.reString } -func isLiteral(re string) bool { - return regexp.QuoteMeta(re) == re +// optimizeAlternatingLiterals optimizes a regex of the form +// +// `literal1|literal2|literal3|...` +// +// this function returns an optimized StringMatcher or nil if the regex +// cannot be optimized in this way, and a list of setMatches up to maxSetMatches. +func optimizeAlternatingLiterals(s string) (StringMatcher, []string) { + if len(s) == 0 { + return emptyStringMatcher{}, nil + } + + estimatedAlternates := strings.Count(s, "|") + 1 + + // If there are no alternates, check if the string is a literal + if estimatedAlternates == 1 { + if regexp.QuoteMeta(s) == s { + return &equalStringMatcher{s: s, caseSensitive: true}, []string{s} + } + return nil, nil + } + + multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates, 0, 0) + + for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') { + // Split the string into the next literal and the remainder + subMatch := s[:end] + s = s[end+1:] + + // break if any of the submatches are not literals + if regexp.QuoteMeta(subMatch) != subMatch { + return nil, nil + } + + multiMatcher.add(subMatch) + } + + // break if the remainder is not a literal + if regexp.QuoteMeta(s) != s { + return nil, nil + } + multiMatcher.add(s) + + return multiMatcher, multiMatcher.setMatches() } // optimizeConcatRegex returns literal prefix/suffix text that can be safely // checked against the label value before running the regexp matcher. -func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { +func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string, contains []string) { sub := r.Sub + clearCapture(sub...) // We can safely remove begin and end text matchers respectively // at the beginning and end of the regexp. @@ -111,15 +390,697 @@ func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { suffix = string(sub[last].Rune) } - // If contains any literal which is not a prefix/suffix, we keep the - // 1st one. We do not keep the whole list of literals to simplify the - // fast path. + // If contains any literal which is not a prefix/suffix, we keep track of + // all the ones which are case-sensitive. for i := 1; i < len(sub)-1; i++ { if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 { - contains = string(sub[i].Rune) - break + contains = append(contains, string(sub[i].Rune)) } } return } + +// StringMatcher is a matcher that matches a string in place of a regular expression. +type StringMatcher interface { + Matches(s string) bool +} + +// stringMatcherFromRegexp attempts to replace a common regexp with a string matcher. +// It returns nil if the regexp is not supported. +func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher { + clearBeginEndText(re) + + m := stringMatcherFromRegexpInternal(re) + m = optimizeEqualOrPrefixStringMatchers(m, minEqualMultiStringMatcherMapThreshold) + + return m +} + +func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher { + clearCapture(re) + + switch re.Op { + case syntax.OpBeginText: + // Correctly handling the begin text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil + case syntax.OpEndText: + // Correctly handling the end text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil + case syntax.OpPlus: + if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL { + return nil + } + return &anyNonEmptyStringMatcher{ + matchNL: re.Sub[0].Op == syntax.OpAnyChar, + } + case syntax.OpStar: + if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL { + return nil + } + + // If the newline is valid, than this matcher literally match any string (even empty). + if re.Sub[0].Op == syntax.OpAnyChar { + return trueMatcher{} + } + + // Any string is fine (including an empty one), as far as it doesn't contain any newline. + return anyStringWithoutNewlineMatcher{} + case syntax.OpQuest: + // Only optimize for ".?". + if len(re.Sub) != 1 || (re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL) { + return nil + } + + return &zeroOrOneCharacterStringMatcher{ + matchNL: re.Sub[0].Op == syntax.OpAnyChar, + } + case syntax.OpEmptyMatch: + return emptyStringMatcher{} + + case syntax.OpLiteral: + return &equalStringMatcher{ + s: string(re.Rune), + caseSensitive: !isCaseInsensitive(re), + } + case syntax.OpAlternate: + or := make([]StringMatcher, 0, len(re.Sub)) + for _, sub := range re.Sub { + m := stringMatcherFromRegexpInternal(sub) + if m == nil { + return nil + } + or = append(or, m) + } + return orStringMatcher(or) + case syntax.OpConcat: + clearCapture(re.Sub...) + + if len(re.Sub) == 0 { + return emptyStringMatcher{} + } + if len(re.Sub) == 1 { + return stringMatcherFromRegexpInternal(re.Sub[0]) + } + + var left, right StringMatcher + + // Let's try to find if there's a first and last any matchers. + if re.Sub[0].Op == syntax.OpPlus || re.Sub[0].Op == syntax.OpStar || re.Sub[0].Op == syntax.OpQuest { + left = stringMatcherFromRegexpInternal(re.Sub[0]) + if left == nil { + return nil + } + re.Sub = re.Sub[1:] + } + if re.Sub[len(re.Sub)-1].Op == syntax.OpPlus || re.Sub[len(re.Sub)-1].Op == syntax.OpStar || re.Sub[len(re.Sub)-1].Op == syntax.OpQuest { + right = stringMatcherFromRegexpInternal(re.Sub[len(re.Sub)-1]) + if right == nil { + return nil + } + re.Sub = re.Sub[:len(re.Sub)-1] + } + + matches, matchesCaseSensitive := findSetMatchesInternal(re, "") + + if len(matches) == 0 && len(re.Sub) == 2 { + // We have not find fixed set matches. We look for other known cases that + // we can optimize. + switch { + // Prefix is literal. + case right == nil && re.Sub[0].Op == syntax.OpLiteral: + right = stringMatcherFromRegexpInternal(re.Sub[1]) + if right != nil { + matches = []string{string(re.Sub[0].Rune)} + matchesCaseSensitive = !isCaseInsensitive(re.Sub[0]) + } + + // Suffix is literal. + case left == nil && re.Sub[1].Op == syntax.OpLiteral: + left = stringMatcherFromRegexpInternal(re.Sub[0]) + if left != nil { + matches = []string{string(re.Sub[1].Rune)} + matchesCaseSensitive = !isCaseInsensitive(re.Sub[1]) + } + } + } + + // Ensure we've found some literals to match (optionally with a left and/or right matcher). + // If not, then this optimization doesn't trigger. + if len(matches) == 0 { + return nil + } + + // Use the right (and best) matcher based on what we've found. + switch { + // No left and right matchers (only fixed set matches). + case left == nil && right == nil: + // if there's no any matchers on both side it's a concat of literals + or := make([]StringMatcher, 0, len(matches)) + for _, match := range matches { + or = append(or, &equalStringMatcher{ + s: match, + caseSensitive: matchesCaseSensitive, + }) + } + return orStringMatcher(or) + + // Right matcher with 1 fixed set match. + case left == nil && len(matches) == 1: + return newLiteralPrefixStringMatcher(matches[0], matchesCaseSensitive, right) + + // Left matcher with 1 fixed set match. + case right == nil && len(matches) == 1: + return &literalSuffixStringMatcher{ + left: left, + suffix: matches[0], + suffixCaseSensitive: matchesCaseSensitive, + } + + // We found literals in the middle. We can trigger the fast path only if + // the matches are case sensitive because containsStringMatcher doesn't + // support case insensitive. + case matchesCaseSensitive: + return &containsStringMatcher{ + substrings: matches, + left: left, + right: right, + } + } + } + return nil +} + +// containsStringMatcher matches a string if it contains any of the substrings. +// If left and right are not nil, it's a contains operation where left and right must match. +// If left is nil, it's a hasPrefix operation and right must match. +// Finally, if right is nil it's a hasSuffix operation and left must match. +type containsStringMatcher struct { + // The matcher that must match the left side. Can be nil. + left StringMatcher + + // At least one of these strings must match in the "middle", between left and right matchers. + substrings []string + + // The matcher that must match the right side. Can be nil. + right StringMatcher +} + +func (m *containsStringMatcher) Matches(s string) bool { + for _, substr := range m.substrings { + switch { + case m.right != nil && m.left != nil: + searchStartPos := 0 + + for { + pos := strings.Index(s[searchStartPos:], substr) + if pos < 0 { + break + } + + // Since we started searching from searchStartPos, we have to add that offset + // to get the actual position of the substring inside the text. + pos += searchStartPos + + // If both the left and right matchers match, then we can stop searching because + // we've found a match. + if m.left.Matches(s[:pos]) && m.right.Matches(s[pos+len(substr):]) { + return true + } + + // Continue searching for another occurrence of the substring inside the text. + searchStartPos = pos + 1 + } + case m.left != nil: + // If we have to check for characters on the left then we need to match a suffix. + if strings.HasSuffix(s, substr) && m.left.Matches(s[:len(s)-len(substr)]) { + return true + } + case m.right != nil: + if strings.HasPrefix(s, substr) && m.right.Matches(s[len(substr):]) { + return true + } + } + } + return false +} + +func newLiteralPrefixStringMatcher(prefix string, prefixCaseSensitive bool, right StringMatcher) StringMatcher { + if prefixCaseSensitive { + return &literalPrefixSensitiveStringMatcher{ + prefix: prefix, + right: right, + } + } + + return &literalPrefixInsensitiveStringMatcher{ + prefix: prefix, + right: right, + } +} + +// literalPrefixSensitiveStringMatcher matches a string with the given literal case-sensitive prefix and right side matcher. +type literalPrefixSensitiveStringMatcher struct { + prefix string + + // The matcher that must match the right side. Can be nil. + right StringMatcher +} + +func (m *literalPrefixSensitiveStringMatcher) Matches(s string) bool { + if !strings.HasPrefix(s, m.prefix) { + return false + } + + // Ensure the right side matches. + return m.right.Matches(s[len(m.prefix):]) +} + +// literalPrefixInsensitiveStringMatcher matches a string with the given literal case-insensitive prefix and right side matcher. +type literalPrefixInsensitiveStringMatcher struct { + prefix string + + // The matcher that must match the right side. Can be nil. + right StringMatcher +} + +func (m *literalPrefixInsensitiveStringMatcher) Matches(s string) bool { + if !hasPrefixCaseInsensitive(s, m.prefix) { + return false + } + + // Ensure the right side matches. + return m.right.Matches(s[len(m.prefix):]) +} + +// literalSuffixStringMatcher matches a string with the given literal suffix and left side matcher. +type literalSuffixStringMatcher struct { + // The matcher that must match the left side. Can be nil. + left StringMatcher + + suffix string + suffixCaseSensitive bool +} + +func (m *literalSuffixStringMatcher) Matches(s string) bool { + // Ensure the suffix matches. + if m.suffixCaseSensitive && !strings.HasSuffix(s, m.suffix) { + return false + } + if !m.suffixCaseSensitive && !hasSuffixCaseInsensitive(s, m.suffix) { + return false + } + + // Ensure the left side matches. + return m.left.Matches(s[:len(s)-len(m.suffix)]) +} + +// emptyStringMatcher matches an empty string. +type emptyStringMatcher struct{} + +func (m emptyStringMatcher) Matches(s string) bool { + return len(s) == 0 +} + +// orStringMatcher matches any of the sub-matchers. +type orStringMatcher []StringMatcher + +func (m orStringMatcher) Matches(s string) bool { + for _, matcher := range m { + if matcher.Matches(s) { + return true + } + } + return false +} + +// equalStringMatcher matches a string exactly and support case insensitive. +type equalStringMatcher struct { + s string + caseSensitive bool +} + +func (m *equalStringMatcher) Matches(s string) bool { + if m.caseSensitive { + return m.s == s + } + return strings.EqualFold(m.s, s) +} + +type multiStringMatcherBuilder interface { + StringMatcher + add(s string) + addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) + setMatches() []string +} + +func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize, estimatedPrefixes, minPrefixLength int) multiStringMatcherBuilder { + // If the estimated size is low enough, it's faster to use a slice instead of a map. + if estimatedSize < minEqualMultiStringMatcherMapThreshold && estimatedPrefixes == 0 { + return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)} + } + + return &equalMultiStringMapMatcher{ + values: make(map[string]struct{}, estimatedSize), + prefixes: make(map[string][]StringMatcher, estimatedPrefixes), + minPrefixLen: minPrefixLength, + caseSensitive: caseSensitive, + } +} + +// equalMultiStringSliceMatcher matches a string exactly against a slice of valid values. +type equalMultiStringSliceMatcher struct { + values []string + + caseSensitive bool +} + +func (m *equalMultiStringSliceMatcher) add(s string) { + m.values = append(m.values, s) +} + +func (m *equalMultiStringSliceMatcher) addPrefix(_ string, _ bool, _ StringMatcher) { + panic("not implemented") +} + +func (m *equalMultiStringSliceMatcher) setMatches() []string { + return m.values +} + +func (m *equalMultiStringSliceMatcher) Matches(s string) bool { + if m.caseSensitive { + for _, v := range m.values { + if s == v { + return true + } + } + } else { + for _, v := range m.values { + if strings.EqualFold(s, v) { + return true + } + } + } + return false +} + +// equalMultiStringMapMatcher matches a string exactly against a map of valid values +// or against a set of prefix matchers. +type equalMultiStringMapMatcher struct { + // values contains values to match a string against. If the matching is case insensitive, + // the values here must be lowercase. + values map[string]struct{} + // prefixes maps strings, all of length minPrefixLen, to sets of matchers to check the rest of the string. + // If the matching is case insensitive, prefixes are all lowercase. + prefixes map[string][]StringMatcher + // minPrefixLen can be zero, meaning there are no prefix matchers. + minPrefixLen int + caseSensitive bool +} + +func (m *equalMultiStringMapMatcher) add(s string) { + if !m.caseSensitive { + s = toNormalisedLower(s) + } + + m.values[s] = struct{}{} +} + +func (m *equalMultiStringMapMatcher) addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) { + if m.minPrefixLen == 0 { + panic("addPrefix called when no prefix length defined") + } + if len(prefix) < m.minPrefixLen { + panic("addPrefix called with a too short prefix") + } + if m.caseSensitive != prefixCaseSensitive { + panic("addPrefix called with a prefix whose case sensitivity is different than the expected one") + } + + s := prefix[:m.minPrefixLen] + if !m.caseSensitive { + s = strings.ToLower(s) + } + + m.prefixes[s] = append(m.prefixes[s], matcher) +} + +func (m *equalMultiStringMapMatcher) setMatches() []string { + if len(m.values) >= maxSetMatches || len(m.prefixes) > 0 { + return nil + } + + matches := make([]string, 0, len(m.values)) + for s := range m.values { + matches = append(matches, s) + } + return matches +} + +func (m *equalMultiStringMapMatcher) Matches(s string) bool { + if !m.caseSensitive { + s = toNormalisedLower(s) + } + + if _, ok := m.values[s]; ok { + return true + } + if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen { + for _, matcher := range m.prefixes[s[:m.minPrefixLen]] { + if matcher.Matches(s) { + return true + } + } + } + return false +} + +// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert +// it to lower case. +func toNormalisedLower(s string) string { + var buf []byte + for i := 0; i < len(s); i++ { + c := s[i] + if c >= utf8.RuneSelf { + return strings.Map(unicode.ToLower, norm.NFKD.String(s)) + } + if 'A' <= c && c <= 'Z' { + if buf == nil { + buf = []byte(s) + } + buf[i] = c + 'a' - 'A' + } + } + if buf == nil { + return s + } + return yoloString(buf) +} + +// anyStringWithoutNewlineMatcher is a stringMatcher which matches any string +// (including an empty one) as far as it doesn't contain any newline character. +type anyStringWithoutNewlineMatcher struct{} + +func (m anyStringWithoutNewlineMatcher) Matches(s string) bool { + // We need to make sure it doesn't contain a newline. Since the newline is + // an ASCII character, we can use strings.IndexByte(). + return strings.IndexByte(s, '\n') == -1 +} + +// anyNonEmptyStringMatcher is a stringMatcher which matches any non-empty string. +type anyNonEmptyStringMatcher struct { + matchNL bool +} + +func (m *anyNonEmptyStringMatcher) Matches(s string) bool { + if m.matchNL { + // It's OK if the string contains a newline so we just need to make + // sure it's non-empty. + return len(s) > 0 + } + + // We need to make sure it non-empty and doesn't contain a newline. + // Since the newline is an ASCII character, we can use strings.IndexByte(). + return len(s) > 0 && strings.IndexByte(s, '\n') == -1 +} + +// zeroOrOneCharacterStringMatcher is a StringMatcher which matches zero or one occurrence +// of any character. The newline character is matches only if matchNL is set to true. +type zeroOrOneCharacterStringMatcher struct { + matchNL bool +} + +func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool { + // If there's more than one rune in the string, then it can't match. + if r, size := utf8.DecodeRuneInString(s); r == utf8.RuneError { + // Size is 0 for empty strings, 1 for invalid rune. + // Empty string matches, invalid rune matches if there isn't anything else. + return size == len(s) + } else if size < len(s) { + return false + } + + // No need to check for the newline if the string is empty or matching a newline is OK. + if m.matchNL || len(s) == 0 { + return true + } + + return s[0] != '\n' +} + +// trueMatcher is a stringMatcher which matches any string (always returns true). +type trueMatcher struct{} + +func (m trueMatcher) Matches(_ string) bool { + return true +} + +// optimizeEqualOrPrefixStringMatchers optimize a specific case where all matchers are made by an +// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher) or +// with a literal prefix (literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher). +// +// In this specific case, when we have many strings to match against we can use a map instead +// of iterating over the list of strings. +func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) StringMatcher { + var ( + caseSensitive bool + caseSensitiveSet bool + numValues int + numPrefixes int + minPrefixLength int + ) + + // Analyse the input StringMatcher to count the number of occurrences + // and ensure all of them have the same case sensitivity. + analyseEqualMatcherCallback := func(matcher *equalStringMatcher) bool { + // Ensure we don't have mixed case sensitivity. + if caseSensitiveSet && caseSensitive != matcher.caseSensitive { + return false + } else if !caseSensitiveSet { + caseSensitive = matcher.caseSensitive + caseSensitiveSet = true + } + + numValues++ + return true + } + + analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + // Ensure we don't have mixed case sensitivity. + if caseSensitiveSet && caseSensitive != prefixCaseSensitive { + return false + } else if !caseSensitiveSet { + caseSensitive = prefixCaseSensitive + caseSensitiveSet = true + } + if numPrefixes == 0 || len(prefix) < minPrefixLength { + minPrefixLength = len(prefix) + } + + numPrefixes++ + return true + } + + if !findEqualOrPrefixStringMatchers(input, analyseEqualMatcherCallback, analysePrefixMatcherCallback) { + return input + } + + // If the number of values and prefixes found is less than the threshold, then we should skip the optimization. + if (numValues + numPrefixes) < threshold { + return input + } + + // Parse again the input StringMatcher to extract all values and storing them. + // We can skip the case sensitivity check because we've already checked it and + // if the code reach this point then it means all matchers have the same case sensitivity. + multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues, numPrefixes, minPrefixLength) + + // Ignore the return value because we already iterated over the input StringMatcher + // and it was all good. + findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { + multiMatcher.add(matcher.s) + return true + }, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + multiMatcher.addPrefix(prefix, caseSensitive, matcher) + return true + }) + + return multiMatcher +} + +// findEqualOrPrefixStringMatchers analyze the input StringMatcher and calls the equalMatcherCallback for each +// equalStringMatcher found, and prefixMatcherCallback for each literalPrefixSensitiveStringMatcher and literalPrefixInsensitiveStringMatcher found. +// +// Returns true if and only if the input StringMatcher is *only* composed by an alternation of equalStringMatcher and/or +// literal prefix matcher. Returns false if prefixMatcherCallback is nil and a literal prefix matcher is encountered. +func findEqualOrPrefixStringMatchers(input StringMatcher, equalMatcherCallback func(matcher *equalStringMatcher) bool, prefixMatcherCallback func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool) bool { + orInput, ok := input.(orStringMatcher) + if !ok { + return false + } + + for _, m := range orInput { + switch casted := m.(type) { + case orStringMatcher: + if !findEqualOrPrefixStringMatchers(m, equalMatcherCallback, prefixMatcherCallback) { + return false + } + + case *equalStringMatcher: + if !equalMatcherCallback(casted) { + return false + } + + case *literalPrefixSensitiveStringMatcher: + if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, true, casted) { + return false + } + + case *literalPrefixInsensitiveStringMatcher: + if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, false, casted) { + return false + } + + default: + // It's not an equal or prefix string matcher, so we have to stop searching + // cause this optimization can't be applied. + return false + } + } + + return true +} + +func hasPrefixCaseInsensitive(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} + +func hasSuffixCaseInsensitive(s, suffix string) bool { + return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix) +} + +func containsInOrder(s string, contains []string) bool { + // Optimization for the case we only have to look for 1 substring. + if len(contains) == 1 { + return strings.Contains(s, contains[0]) + } + + return containsInOrderMulti(s, contains) +} + +func containsInOrderMulti(s string, contains []string) bool { + offset := 0 + + for _, substr := range contains { + at := strings.Index(s[offset:], substr) + if at == -1 { + return false + } + + offset += at + len(substr) + } + + return true +} diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index 3188f7cefc..8df0dbb023 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -14,50 +14,126 @@ package labels import ( + "fmt" + "math/rand" "strings" "testing" + "time" + "unicode/utf8" + "github.com/grafana/regexp" "github.com/grafana/regexp/syntax" "github.com/stretchr/testify/require" ) -func TestNewFastRegexMatcher(t *testing.T) { - cases := []struct { - regex string - value string - expected bool - }{ - {regex: "(foo|bar)", value: "foo", expected: true}, - {regex: "(foo|bar)", value: "foo bar", expected: false}, - {regex: "(foo|bar)", value: "bar", expected: true}, - {regex: "foo.*", value: "foo bar", expected: true}, - {regex: "foo.*", value: "bar foo", expected: false}, - {regex: ".*foo", value: "foo bar", expected: false}, - {regex: ".*foo", value: "bar foo", expected: true}, - {regex: ".*foo", value: "foo", expected: true}, - {regex: "^.*foo$", value: "foo", expected: true}, - {regex: "^.+foo$", value: "foo", expected: false}, - {regex: "^.+foo$", value: "bfoo", expected: true}, - {regex: ".*", value: "\n", expected: false}, - {regex: ".*", value: "\nfoo", expected: false}, - {regex: ".*foo", value: "\nfoo", expected: false}, - {regex: "foo.*", value: "foo\n", expected: false}, - {regex: "foo\n.*", value: "foo\n", expected: true}, - {regex: ".*foo.*", value: "foo", expected: true}, - {regex: ".*foo.*", value: "foo bar", expected: true}, - {regex: ".*foo.*", value: "hello foo world", expected: true}, - {regex: ".*foo.*", value: "hello foo\n world", expected: false}, - {regex: ".*foo\n.*", value: "hello foo\n world", expected: true}, - {regex: ".*", value: "foo", expected: true}, - {regex: "", value: "foo", expected: false}, - {regex: "", value: "", expected: true}, +var ( + asciiRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_") + regexes = []string{ + "", + "foo", + "^foo", + "(foo|bar)", + "foo.*", + ".*foo", + "^.*foo$", + "^.+foo$", + ".?", + ".*", + ".+", + "foo.+", + ".+foo", + "foo\n.+", + "foo\n.*", + ".*foo.*", + ".+foo.+", + "(?s:.*)", + "(?s:.+)", + "(?s:^.*foo$)", + "(?i:foo)", + "(?i:(foo|bar))", + "(?i:(foo1|foo2|bar))", + "^(?i:foo|oo)|(bar)$", + "(?i:(foo1|foo2|aaa|bbb|ccc|ddd|eee|fff|ggg|hhh|iii|lll|mmm|nnn|ooo|ppp|qqq|rrr|sss|ttt|uuu|vvv|www|xxx|yyy|zzz))", + "((.*)(bar|b|buzz)(.+)|foo)$", + "^$", + "(prometheus|api_prom)_api_v1_.+", + "10\\.0\\.(1|2)\\.+", + "10\\.0\\.(1|2).+", + "((fo(bar))|.+foo)", + // A long case sensitive alternation. + "zQPbMkNO|NNSPdvMi|iWuuSoAl|qbvKMimS|IecrXtPa|seTckYqt|NxnyHkgB|fIDlOgKb|UhlWIygH|OtNoJxHG|cUTkFVIV|mTgFIHjr|jQkoIDtE|PPMKxRXl|AwMfwVkQ|CQyMrTQJ|BzrqxVSi|nTpcWuhF|PertdywG|ZZDgCtXN|WWdDPyyE|uVtNQsKk|BdeCHvPZ|wshRnFlH|aOUIitIp|RxZeCdXT|CFZMslCj|AVBZRDxl|IzIGCnhw|ythYuWiz|oztXVXhl|VbLkwqQx|qvaUgyVC|VawUjPWC|ecloYJuj|boCLTdSU|uPrKeAZx|hrMWLWBq|JOnUNHRM|rYnujkPq|dDEdZhIj|DRrfvugG|yEGfDxVV|YMYdJWuP|PHUQZNWM|AmKNrLis|zTxndVfn|FPsHoJnc|EIulZTua|KlAPhdzg|ScHJJCLt|NtTfMzME|eMCwuFdo|SEpJVJbR|cdhXZeCx|sAVtBwRh|kVFEVcMI|jzJrxraA|tGLHTell|NNWoeSaw|DcOKSetX|UXZAJyka|THpMphDP|rizheevl|kDCBRidd|pCZZRqyu|pSygkitl|SwZGkAaW|wILOrfNX|QkwVOerj|kHOMxPDr|EwOVycJv|AJvtzQFS|yEOjKYYB|LizIINLL|JBRSsfcG|YPiUqqNl|IsdEbvee|MjEpGcBm|OxXZVgEQ|xClXGuxa|UzRCGFEb|buJbvfvA|IPZQxRet|oFYShsMc|oBHffuHO|bzzKrcBR|KAjzrGCl|IPUsAVls|OGMUMbIU|gyDccHuR|bjlalnDd|ZLWjeMna|fdsuIlxQ|dVXtiomV|XxedTjNg|XWMHlNoA|nnyqArQX|opfkWGhb|wYtnhdYb", + // An extremely long case sensitive alternation. This is a special + // case because the values share common prefixes rather than being + // entirely random. This is common in the real world. For example, the + // values of a label like kubernetes pod will often include the + // deployment name as a prefix. + "jyyfj00j0061|jyyfj00j0062|jyyfj94j0093|jyyfj99j0093|jyyfm01j0021|jyyfm02j0021|jyefj00j0192|jyefj00j0193|jyefj00j0194|jyefj00j0195|jyefj00j0196|jyefj00j0197|jyefj00j0290|jyefj00j0291|jyefj00j0292|jyefj00j0293|jyefj00j0294|jyefj00j0295|jyefj00j0296|jyefj00j0297|jyefj89j0394|jyefj90j0394|jyefj91j0394|jyefj95j0347|jyefj96j0322|jyefj96j0347|jyefj97j0322|jyefj97j0347|jyefj98j0322|jyefj98j0347|jyefj99j0320|jyefj99j0322|jyefj99j0323|jyefj99j0335|jyefj99j0336|jyefj99j0344|jyefj99j0347|jyefj99j0349|jyefj99j0351|jyeff00j0117|lyyfm01j0025|lyyfm01j0028|lyyfm01j0041|lyyfm01j0133|lyyfm01j0701|lyyfm02j0025|lyyfm02j0028|lyyfm02j0041|lyyfm02j0133|lyyfm02j0701|lyyfm03j0701|lyefj00j0775|lyefj00j0776|lyefj00j0777|lyefj00j0778|lyefj00j0779|lyefj00j0780|lyefj00j0781|lyefj00j0782|lyefj50j3807|lyefj50j3852|lyefj51j3807|lyefj51j3852|lyefj52j3807|lyefj52j3852|lyefj53j3807|lyefj53j3852|lyefj54j3807|lyefj54j3852|lyefj54j3886|lyefj55j3807|lyefj55j3852|lyefj55j3886|lyefj56j3807|lyefj56j3852|lyefj56j3886|lyefj57j3807|lyefj57j3852|lyefj57j3886|lyefj58j3807|lyefj58j3852|lyefj58j3886|lyefj59j3807|lyefj59j3852|lyefj59j3886|lyefj60j3807|lyefj60j3852|lyefj60j3886|lyefj61j3807|lyefj61j3852|lyefj61j3886|lyefj62j3807|lyefj62j3852|lyefj62j3886|lyefj63j3807|lyefj63j3852|lyefj63j3886|lyefj64j3807|lyefj64j3852|lyefj64j3886|lyefj65j3807|lyefj65j3852|lyefj65j3886|lyefj66j3807|lyefj66j3852|lyefj66j3886|lyefj67j3807|lyefj67j3852|lyefj67j3886|lyefj68j3807|lyefj68j3852|lyefj68j3886|lyefj69j3807|lyefj69j3846|lyefj69j3852|lyefj69j3886|lyefj70j3807|lyefj70j3846|lyefj70j3852|lyefj70j3886|lyefj71j3807|lyefj71j3846|lyefj71j3852|lyefj71j3886|lyefj72j3807|lyefj72j3846|lyefj72j3852|lyefj72j3886|lyefj73j3807|lyefj73j3846|lyefj73j3852|lyefj73j3886|lyefj74j3807|lyefj74j3846|lyefj74j3852|lyefj74j3886|lyefj75j3807|lyefj75j3808|lyefj75j3846|lyefj75j3852|lyefj75j3886|lyefj76j3732|lyefj76j3807|lyefj76j3808|lyefj76j3846|lyefj76j3852|lyefj76j3886|lyefj77j3732|lyefj77j3807|lyefj77j3808|lyefj77j3846|lyefj77j3852|lyefj77j3886|lyefj78j3278|lyefj78j3732|lyefj78j3807|lyefj78j3808|lyefj78j3846|lyefj78j3852|lyefj78j3886|lyefj79j3732|lyefj79j3807|lyefj79j3808|lyefj79j3846|lyefj79j3852|lyefj79j3886|lyefj80j3732|lyefj80j3807|lyefj80j3808|lyefj80j3846|lyefj80j3852|lyefj80j3886|lyefj81j3732|lyefj81j3807|lyefj81j3808|lyefj81j3846|lyefj81j3852|lyefj81j3886|lyefj82j3732|lyefj82j3807|lyefj82j3808|lyefj82j3846|lyefj82j3852|lyefj82j3886|lyefj83j3732|lyefj83j3807|lyefj83j3808|lyefj83j3846|lyefj83j3852|lyefj83j3886|lyefj84j3732|lyefj84j3807|lyefj84j3808|lyefj84j3846|lyefj84j3852|lyefj84j3886|lyefj85j3732|lyefj85j3807|lyefj85j3808|lyefj85j3846|lyefj85j3852|lyefj85j3886|lyefj86j3278|lyefj86j3732|lyefj86j3807|lyefj86j3808|lyefj86j3846|lyefj86j3852|lyefj86j3886|lyefj87j3278|lyefj87j3732|lyefj87j3807|lyefj87j3808|lyefj87j3846|lyefj87j3852|lyefj87j3886|lyefj88j3732|lyefj88j3807|lyefj88j3808|lyefj88j3846|lyefj88j3852|lyefj88j3886|lyefj89j3732|lyefj89j3807|lyefj89j3808|lyefj89j3846|lyefj89j3852|lyefj89j3886|lyefj90j3732|lyefj90j3807|lyefj90j3808|lyefj90j3846|lyefj90j3852|lyefj90j3886|lyefj91j3732|lyefj91j3807|lyefj91j3808|lyefj91j3846|lyefj91j3852|lyefj91j3886|lyefj92j3732|lyefj92j3807|lyefj92j3808|lyefj92j3846|lyefj92j3852|lyefj92j3886|lyefj93j3732|lyefj93j3807|lyefj93j3808|lyefj93j3846|lyefj93j3852|lyefj93j3885|lyefj93j3886|lyefj94j3525|lyefj94j3732|lyefj94j3807|lyefj94j3808|lyefj94j3846|lyefj94j3852|lyefj94j3885|lyefj94j3886|lyefj95j3525|lyefj95j3732|lyefj95j3807|lyefj95j3808|lyefj95j3846|lyefj95j3852|lyefj95j3886|lyefj96j3732|lyefj96j3803|lyefj96j3807|lyefj96j3808|lyefj96j3846|lyefj96j3852|lyefj96j3886|lyefj97j3333|lyefj97j3732|lyefj97j3792|lyefj97j3803|lyefj97j3807|lyefj97j3808|lyefj97j3838|lyefj97j3843|lyefj97j3846|lyefj97j3852|lyefj97j3886|lyefj98j3083|lyefj98j3333|lyefj98j3732|lyefj98j3807|lyefj98j3808|lyefj98j3838|lyefj98j3843|lyefj98j3846|lyefj98j3852|lyefj98j3873|lyefj98j3877|lyefj98j3882|lyefj98j3886|lyefj99j2984|lyefj99j3083|lyefj99j3333|lyefj99j3732|lyefj99j3807|lyefj99j3808|lyefj99j3846|lyefj99j3849|lyefj99j3852|lyefj99j3873|lyefj99j3877|lyefj99j3882|lyefj99j3884|lyefj99j3886|lyeff00j0106|lyeff00j0107|lyeff00j0108|lyeff00j0129|lyeff00j0130|lyeff00j0131|lyeff00j0132|lyeff00j0133|lyeff00j0134|lyeff00j0444|lyeff00j0445|lyeff91j0473|lyeff92j0473|lyeff92j3877|lyeff93j3877|lyeff94j0501|lyeff94j3525|lyeff94j3877|lyeff95j0501|lyeff95j3525|lyeff95j3877|lyeff96j0503|lyeff96j3877|lyeff97j3877|lyeff98j3333|lyeff98j3877|lyeff99j2984|lyeff99j3333|lyeff99j3877|mfyr9149ej|mfyr9149ek|mfyr9156ej|mfyr9156ek|mfyr9157ej|mfyr9157ek|mfyr9159ej|mfyr9159ek|mfyr9203ej|mfyr9204ej|mfyr9205ej|mfyr9206ej|mfyr9207ej|mfyr9207ek|mfyr9217ej|mfyr9217ek|mfyr9222ej|mfyr9222ek|mfyu0185ej|mfye9187ej|mfye9187ek|mfye9188ej|mfye9188ek|mfye9189ej|mfye9189ek|mfyf0185ej|oyefj87j0007|oyefj88j0007|oyefj89j0007|oyefj90j0007|oyefj91j0007|oyefj95j0001|oyefj96j0001|oyefj98j0004|oyefj99j0004|oyeff91j0004|oyeff92j0004|oyeff93j0004|oyeff94j0004|oyeff95j0004|oyeff96j0004|rklvyaxmany|ryefj93j0001|ryefj94j0001|tyyfj00a0001|tyyfj84j0005|tyyfj85j0005|tyyfj86j0005|tyyfj87j0005|tyyfj88j0005|tyyfj89j0005|tyyfj90j0005|tyyfj91j0005|tyyfj92j0005|tyyfj93j0005|tyyfj94j0005|tyyfj95j0005|tyyfj96j0005|tyyfj97j0005|tyyfj98j0005|tyyfj99j0005|tyefj50j0015|tyefj50j0017|tyefj50j0019|tyefj50j0020|tyefj50j0021|tyefj51j0015|tyefj51j0017|tyefj51j0019|tyefj51j0020|tyefj51j0021|tyefj52j0015|tyefj52j0017|tyefj52j0019|tyefj52j0020|tyefj52j0021|tyefj53j0015|tyefj53j0017|tyefj53j0019|tyefj53j0020|tyefj53j0021|tyefj54j0015|tyefj54j0017|tyefj54j0019|tyefj54j0020|tyefj54j0021|tyefj55j0015|tyefj55j0017|tyefj55j0019|tyefj55j0020|tyefj55j0021|tyefj56j0015|tyefj56j0017|tyefj56j0019|tyefj56j0020|tyefj56j0021|tyefj57j0015|tyefj57j0017|tyefj57j0019|tyefj57j0020|tyefj57j0021|tyefj58j0015|tyefj58j0017|tyefj58j0019|tyefj58j0020|tyefj58j0021|tyefj59j0015|tyefj59j0017|tyefj59j0019|tyefj59j0020|tyefj59j0021|tyefj60j0015|tyefj60j0017|tyefj60j0019|tyefj60j0020|tyefj60j0021|tyefj61j0015|tyefj61j0017|tyefj61j0019|tyefj61j0020|tyefj61j0021|tyefj62j0015|tyefj62j0017|tyefj62j0019|tyefj62j0020|tyefj62j0021|tyefj63j0015|tyefj63j0017|tyefj63j0019|tyefj63j0020|tyefj63j0021|tyefj64j0015|tyefj64j0017|tyefj64j0019|tyefj64j0020|tyefj64j0021|tyefj65j0015|tyefj65j0017|tyefj65j0019|tyefj65j0020|tyefj65j0021|tyefj66j0015|tyefj66j0017|tyefj66j0019|tyefj66j0020|tyefj66j0021|tyefj67j0015|tyefj67j0017|tyefj67j0019|tyefj67j0020|tyefj67j0021|tyefj68j0015|tyefj68j0017|tyefj68j0019|tyefj68j0020|tyefj68j0021|tyefj69j0015|tyefj69j0017|tyefj69j0019|tyefj69j0020|tyefj69j0021|tyefj70j0015|tyefj70j0017|tyefj70j0019|tyefj70j0020|tyefj70j0021|tyefj71j0015|tyefj71j0017|tyefj71j0019|tyefj71j0020|tyefj71j0021|tyefj72j0015|tyefj72j0017|tyefj72j0019|tyefj72j0020|tyefj72j0021|tyefj72j0022|tyefj73j0015|tyefj73j0017|tyefj73j0019|tyefj73j0020|tyefj73j0021|tyefj73j0022|tyefj74j0015|tyefj74j0017|tyefj74j0019|tyefj74j0020|tyefj74j0021|tyefj74j0022|tyefj75j0015|tyefj75j0017|tyefj75j0019|tyefj75j0020|tyefj75j0021|tyefj75j0022|tyefj76j0015|tyefj76j0017|tyefj76j0019|tyefj76j0020|tyefj76j0021|tyefj76j0022|tyefj76j0119|tyefj77j0015|tyefj77j0017|tyefj77j0019|tyefj77j0020|tyefj77j0021|tyefj77j0022|tyefj77j0119|tyefj78j0015|tyefj78j0017|tyefj78j0019|tyefj78j0020|tyefj78j0021|tyefj78j0022|tyefj78j0119|tyefj79j0015|tyefj79j0017|tyefj79j0019|tyefj79j0020|tyefj79j0021|tyefj79j0022|tyefj79j0119|tyefj80j0015|tyefj80j0017|tyefj80j0019|tyefj80j0020|tyefj80j0021|tyefj80j0022|tyefj80j0114|tyefj80j0119|tyefj81j0015|tyefj81j0017|tyefj81j0019|tyefj81j0020|tyefj81j0021|tyefj81j0022|tyefj81j0114|tyefj81j0119|tyefj82j0015|tyefj82j0017|tyefj82j0019|tyefj82j0020|tyefj82j0021|tyefj82j0022|tyefj82j0119|tyefj83j0015|tyefj83j0017|tyefj83j0019|tyefj83j0020|tyefj83j0021|tyefj83j0022|tyefj83j0119|tyefj84j0014|tyefj84j0015|tyefj84j0017|tyefj84j0019|tyefj84j0020|tyefj84j0021|tyefj84j0022|tyefj84j0119|tyefj85j0014|tyefj85j0015|tyefj85j0017|tyefj85j0019|tyefj85j0020|tyefj85j0021|tyefj85j0022|tyefj85j0119|tyefj86j0014|tyefj86j0015|tyefj86j0017|tyefj86j0019|tyefj86j0020|tyefj86j0021|tyefj86j0022|tyefj87j0014|tyefj87j0015|tyefj87j0017|tyefj87j0019|tyefj87j0020|tyefj87j0021|tyefj87j0022|tyefj88j0014|tyefj88j0015|tyefj88j0017|tyefj88j0019|tyefj88j0020|tyefj88j0021|tyefj88j0022|tyefj88j0100|tyefj88j0115|tyefj89j0003|tyefj89j0014|tyefj89j0015|tyefj89j0017|tyefj89j0019|tyefj89j0020|tyefj89j0021|tyefj89j0022|tyefj89j0100|tyefj89j0115|tyefj90j0014|tyefj90j0015|tyefj90j0016|tyefj90j0017|tyefj90j0018|tyefj90j0019|tyefj90j0020|tyefj90j0021|tyefj90j0022|tyefj90j0100|tyefj90j0111|tyefj90j0115|tyefj91j0014|tyefj91j0015|tyefj91j0016|tyefj91j0017|tyefj91j0018|tyefj91j0019|tyefj91j0020|tyefj91j0021|tyefj91j0022|tyefj91j0100|tyefj91j0111|tyefj91j0115|tyefj92j0014|tyefj92j0015|tyefj92j0016|tyefj92j0017|tyefj92j0018|tyefj92j0019|tyefj92j0020|tyefj92j0021|tyefj92j0022|tyefj92j0100|tyefj92j0105|tyefj92j0115|tyefj92j0121|tyefj93j0004|tyefj93j0014|tyefj93j0015|tyefj93j0017|tyefj93j0018|tyefj93j0019|tyefj93j0020|tyefj93j0021|tyefj93j0022|tyefj93j0100|tyefj93j0105|tyefj93j0115|tyefj93j0121|tyefj94j0002|tyefj94j0004|tyefj94j0008|tyefj94j0014|tyefj94j0015|tyefj94j0017|tyefj94j0019|tyefj94j0020|tyefj94j0021|tyefj94j0022|tyefj94j0084|tyefj94j0088|tyefj94j0100|tyefj94j0106|tyefj94j0116|tyefj94j0121|tyefj94j0123|tyefj95j0002|tyefj95j0004|tyefj95j0008|tyefj95j0014|tyefj95j0015|tyefj95j0017|tyefj95j0019|tyefj95j0020|tyefj95j0021|tyefj95j0022|tyefj95j0084|tyefj95j0088|tyefj95j0100|tyefj95j0101|tyefj95j0106|tyefj95j0112|tyefj95j0116|tyefj95j0121|tyefj95j0123|tyefj96j0014|tyefj96j0015|tyefj96j0017|tyefj96j0019|tyefj96j0020|tyefj96j0021|tyefj96j0022|tyefj96j0082|tyefj96j0084|tyefj96j0100|tyefj96j0101|tyefj96j0112|tyefj96j0117|tyefj96j0121|tyefj96j0124|tyefj97j0014|tyefj97j0015|tyefj97j0017|tyefj97j0019|tyefj97j0020|tyefj97j0021|tyefj97j0022|tyefj97j0081|tyefj97j0087|tyefj97j0098|tyefj97j0100|tyefj97j0107|tyefj97j0109|tyefj97j0113|tyefj97j0117|tyefj97j0118|tyefj97j0121|tyefj98j0003|tyefj98j0006|tyefj98j0014|tyefj98j0015|tyefj98j0017|tyefj98j0019|tyefj98j0020|tyefj98j0021|tyefj98j0022|tyefj98j0083|tyefj98j0085|tyefj98j0086|tyefj98j0100|tyefj98j0104|tyefj98j0118|tyefj98j0121|tyefj99j0003|tyefj99j0006|tyefj99j0007|tyefj99j0014|tyefj99j0015|tyefj99j0017|tyefj99j0019|tyefj99j0020|tyefj99j0021|tyefj99j0022|tyefj99j0023|tyefj99j0100|tyefj99j0108|tyefj99j0110|tyefj99j0121|tyefj99j0125|tyeff94j0002|tyeff94j0008|tyeff94j0010|tyeff94j0011|tyeff94j0035|tyeff95j0002|tyeff95j0006|tyeff95j0008|tyeff95j0010|tyeff95j0011|tyeff95j0035|tyeff96j0003|tyeff96j0006|tyeff96j0009|tyeff96j0010|tyeff97j0004|tyeff97j0009|tyeff97j0116|tyeff98j0007|tyeff99j0007|tyeff99j0125|uyyfj00j0484|uyyfj00j0485|uyyfj00j0486|uyyfj00j0487|uyyfj00j0488|uyyfj00j0489|uyyfj00j0490|uyyfj00j0491|uyyfj00j0492|uyyfj00j0493|uyyfj00j0494|uyyfj00j0495|uyyfj00j0496|uyyfj00j0497|uyyfj00j0498|uyyfj00j0499|uyyfj00j0500|uyyfj00j0501|uyyfj00j0502|uyyfj00j0503|uyyfj00j0504|uyyfj00j0505|uyyfj00j0506|uyyfj00j0507|uyyfj00j0508|uyyfj00j0509|uyyfj00j0510|uyyfj00j0511|uyyfj00j0512|uyyfj00j0513|uyyfj00j0514|uyyfj00j0515|uyyfj00j0516|uyyfj00j0517|uyyfj00j0518|uyyfj00j0519|uyyfj00j0520|uyyfj00j0521|uyyfj00j0522|uyyfj00j0523|uyyfj00j0524|uyyfj00j0525|uyyfj00j0526|uyyfj00j0527|uyyfj00j0528|uyyfj00j0529|uyyfj00j0530|uyyfj00j0531|uyyfj00j0532|uyyfj00j0533|uyyfj00j0534|uyyfj00j0535|uyyfj00j0536|uyyfj00j0537|uyyfj00j0538|uyyfj00j0539|uyyfj00j0540|uyyfj00j0541|uyyfj00j0542|uyyfj00j0543|uyyfj00j0544|uyyfj00j0545|uyyfj00j0546|uyyfj00j0547|uyyfj00j0548|uyyfj00j0549|uyyfj00j0550|uyyfj00j0551|uyyfj00j0553|uyyfj00j0554|uyyfj00j0555|uyyfj00j0556|uyyfj00j0557|uyyfj00j0558|uyyfj00j0559|uyyfj00j0560|uyyfj00j0561|uyyfj00j0562|uyyfj00j0563|uyyfj00j0564|uyyfj00j0565|uyyfj00j0566|uyyfj00j0614|uyyfj00j0615|uyyfj00j0616|uyyfj00j0617|uyyfj00j0618|uyyfj00j0619|uyyfj00j0620|uyyfj00j0621|uyyfj00j0622|uyyfj00j0623|uyyfj00j0624|uyyfj00j0625|uyyfj00j0626|uyyfj00j0627|uyyfj00j0628|uyyfj00j0629|uyyfj00j0630|uyyfj00j0631|uyyfj00j0632|uyyfj00j0633|uyyfj00j0634|uyyfj00j0635|uyyfj00j0636|uyyfj00j0637|uyyfj00j0638|uyyfj00j0639|uyyfj00j0640|uyyfj00j0641|uyyfj00j0642|uyyfj00j0643|uyyfj00j0644|uyyfj00j0645|uyyfj00j0646|uyyfj00j0647|uyyfj00j0648|uyyfj00j0649|uyyfj00j0650|uyyfj00j0651|uyyfj00j0652|uyyfj00j0653|uyyfj00j0654|uyyfj00j0655|uyyfj00j0656|uyyfj00j0657|uyyfj00j0658|uyyfj00j0659|uyyfj00j0660|uyyfj00j0661|uyyfj00j0662|uyyfj00j0663|uyyfj00j0664|uyyfj00j0665|uyyfj00j0666|uyyfj00j0667|uyyfj00j0668|uyyfj00j0669|uyyfj00j0670|uyyfj00j0671|uyyfj00j0672|uyyfj00j0673|uyyfj00j0674|uyyfj00j0675|uyyfj00j0676|uyyfj00j0677|uyyfj00j0678|uyyfj00j0679|uyyfj00j0680|uyyfj00j0681|uyyfj00j0682|uyyfj00j0683|uyyfj00j0684|uyyfj00j0685|uyyfj00j0686|uyyfj00j0687|uyyfj00j0688|uyyfj00j0689|uyyfj00j0690|uyyfj00j0691|uyyfj00j0692|uyyfj00j0693|uyyfj00j0694|uyyfj00j0695|uyyfj00j0696|uyyfj00j0697|uyyfj00j0698|uyyfj00j0699|uyyfj00j0700|uyyfj00j0701|uyyfj00j0702|uyyfj00j0703|uyyfj00j0704|uyyfj00j0705|uyyfj00j0706|uyyfj00j0707|uyyfj00j0708|uyyfj00j0709|uyyfj00j0710|uyyfj00j0711|uyyfj00j0712|uyyfj00j0713|uyyfj00j0714|uyyfj00j0715|uyyfj00j0716|uyyfj00j0717|uyyfj00j0718|uyyfj00j0719|uyyfj00j0720|uyyfj00j0721|uyyfj00j0722|uyyfj00j0723|uyyfj00j0724|uyyfj00j0725|uyyfj00j0726|uyyfj00j0727|uyyfj00j0728|uyyfj00j0729|uyyfj00j0730|uyyfj00j0731|uyyfj00j0732|uyyfj00j0733|uyyfj00j0734|uyyfj00j0735|uyyfj00j0736|uyyfj00j0737|uyyfj00j0738|uyyfj00j0739|uyyfj00j0740|uyyfj00j0741|uyyfj00j0742|uyyfj00j0743|uyyfj00j0744|uyyfj00j0745|uyyfj00j0746|uyyfj00j0747|uyyfj00j0748|uyyfj00j0749|uyyfj00j0750|uyyfj00j0751|uyyfj00j0752|uyyfj00j0753|uyyfj00j0754|uyyfj00j0755|uyyfj00j0756|uyyfj00j0757|uyyfj00j0758|uyyfj00j0759|uyyfj00j0760|uyyfj00j0761|uyyfj00j0762|uyyfj00j0763|uyyfj00j0764|uyyfj00j0765|uyyfj00j0766|uyyfj00j0767|uyyfj00j0768|uyyfj00j0769|uyyfj00j0770|uyyfj00j0771|uyyfj00j0772|uyyfj00j0773|uyyfj00j0774|uyyfj00j0775|uyyfj00j0776|uyyfj00j0777|uyyfj00j0778|uyyfj00j0779|uyyfj00j0780|uyyfj00j0781|uyyfj00j0782|uyyff00j0011|uyyff00j0031|uyyff00j0032|uyyff00j0033|uyyff00j0034|uyyff99j0012|uyefj00j0071|uyefj00j0455|uyefj00j0456|uyefj00j0582|uyefj00j0583|uyefj00j0584|uyefj00j0585|uyefj00j0586|uyefj00j0590|uyeff00j0188|xyrly-f-jyy-y01|xyrly-f-jyy-y02|xyrly-f-jyy-y03|xyrly-f-jyy-y04|xyrly-f-jyy-y05|xyrly-f-jyy-y06|xyrly-f-jyy-y07|xyrly-f-jyy-y08|xyrly-f-jyy-y09|xyrly-f-jyy-y10|xyrly-f-jyy-y11|xyrly-f-jyy-y12|xyrly-f-jyy-y13|xyrly-f-jyy-y14|xyrly-f-jyy-y15|xyrly-f-jyy-y16|xyrly-f-url-y01|xyrly-f-url-y02|yyefj97j0005|ybyfcy4000|ybyfcy4001|ayefj99j0035|by-b-y-bzu-l01|by-b-y-bzu-l02|by-b-e-079|by-b-e-080|by-b-e-082|by-b-e-083|byefj72j0002|byefj73j0002|byefj74j0002|byefj75j0002|byefj76j0002|byefj77j0002|byefj78j0002|byefj79j0002|byefj91j0007|byefj92j0007|byefj98j0003|byefj99j0003|byefj99j0005|byefj99j0006|byeff88j0002|byeff89j0002|byeff90j0002|byeff91j0002|byeff92j0002|byeff93j0002|byeff96j0003|byeff97j0003|byeff98j0003|byeff99j0003|fymfj98j0001|fymfj99j0001|fyyaj98k0297|fyyaj99k0297|fyyfj00j0109|fyyfj00j0110|fyyfj00j0122|fyyfj00j0123|fyyfj00j0201|fyyfj00j0202|fyyfj00j0207|fyyfj00j0208|fyyfj00j0227|fyyfj00j0228|fyyfj00j0229|fyyfj00j0230|fyyfj00j0231|fyyfj00j0232|fyyfj00j0233|fyyfj00j0234|fyyfj00j0235|fyyfj00j0236|fyyfj00j0237|fyyfj00j0238|fyyfj00j0239|fyyfj00j0240|fyyfj00j0241|fyyfj00j0242|fyyfj00j0243|fyyfj00j0244|fyyfj00j0245|fyyfj00j0246|fyyfj00j0247|fyyfj00j0248|fyyfj00j0249|fyyfj00j0250|fyyfj00j0251|fyyfj00j0252|fyyfj00j0253|fyyfj00j0254|fyyfj00j0255|fyyfj00j0256|fyyfj00j0257|fyyfj00j0258|fyyfj00j0259|fyyfj00j0260|fyyfj00j0261|fyyfj00j0262|fyyfj00j0263|fyyfj00j0264|fyyfj00j0265|fyyfj00j0266|fyyfj00j0267|fyyfj00j0268|fyyfj00j0290|fyyfj00j0291|fyyfj00j0292|fyyfj00j0293|fyyfj00j0294|fyyfj00j0295|fyyfj00j0296|fyyfj00j0297|fyyfj00j0298|fyyfj00j0299|fyyfj00j0300|fyyfj00j0301|fyyfj00j0302|fyyfj00j0303|fyyfj00j0304|fyyfj00j0305|fyyfj00j0306|fyyfj00j0307|fyyfj00j0308|fyyfj00j0309|fyyfj00j0310|fyyfj00j0311|fyyfj00j0312|fyyfj00j0313|fyyfj00j0314|fyyfj00j0315|fyyfj00j0316|fyyfj00j0317|fyyfj00j0318|fyyfj00j0319|fyyfj00j0320|fyyfj00j0321|fyyfj00j0322|fyyfj00j0323|fyyfj00j0324|fyyfj00j0325|fyyfj00j0326|fyyfj00j0327|fyyfj00j0328|fyyfj00j0329|fyyfj00j0330|fyyfj00j0331|fyyfj00j0332|fyyfj00j0333|fyyfj00j0334|fyyfj00j0335|fyyfj00j0340|fyyfj00j0341|fyyfj00j0342|fyyfj00j0343|fyyfj00j0344|fyyfj00j0345|fyyfj00j0346|fyyfj00j0347|fyyfj00j0348|fyyfj00j0349|fyyfj00j0367|fyyfj00j0368|fyyfj00j0369|fyyfj00j0370|fyyfj00j0371|fyyfj00j0372|fyyfj00j0373|fyyfj00j0374|fyyfj00j0375|fyyfj00j0376|fyyfj00j0377|fyyfj00j0378|fyyfj00j0379|fyyfj00j0380|fyyfj00j0381|fyyfj00j0382|fyyfj00j0383|fyyfj00j0384|fyyfj00j0385|fyyfj00j0386|fyyfj00j0387|fyyfj00j0388|fyyfj00j0415|fyyfj00j0416|fyyfj00j0417|fyyfj00j0418|fyyfj00j0419|fyyfj00j0420|fyyfj00j0421|fyyfj00j0422|fyyfj00j0423|fyyfj00j0424|fyyfj00j0425|fyyfj00j0426|fyyfj00j0427|fyyfj00j0428|fyyfj00j0429|fyyfj00j0430|fyyfj00j0431|fyyfj00j0432|fyyfj00j0433|fyyfj00j0434|fyyfj00j0435|fyyfj00j0436|fyyfj00j0437|fyyfj00j0438|fyyfj00j0439|fyyfj00j0440|fyyfj00j0441|fyyfj00j0446|fyyfj00j0447|fyyfj00j0448|fyyfj00j0449|fyyfj00j0451|fyyfj00j0452|fyyfj00j0453|fyyfj00j0454|fyyfj00j0455|fyyfj00j0456|fyyfj00j0457|fyyfj00j0459|fyyfj00j0460|fyyfj00j0461|fyyfj00j0462|fyyfj00j0463|fyyfj00j0464|fyyfj00j0465|fyyfj00j0466|fyyfj00j0467|fyyfj00j0468|fyyfj00j0469|fyyfj00j0470|fyyfj00j0471|fyyfj00j0474|fyyfj00j0475|fyyfj00j0476|fyyfj00j0477|fyyfj00j0478|fyyfj00j0479|fyyfj00j0480|fyyfj00j0481|fyyfj00j0482|fyyfj00j0483|fyyfj00j0484|fyyfj00j0485|fyyfj00j0486|fyyfj00j0487|fyyfj00j0488|fyyfj00j0489|fyyfj00j0490|fyyfj00j0491|fyyfj00j0492|fyyfj00j0493|fyyfj00j0494|fyyfj00j0495|fyyfj00j0496|fyyfj00j0497|fyyfj00j0498|fyyfj00j0499|fyyfj00j0500|fyyfj00j0501|fyyfj00j0502|fyyfj00j0503|fyyfj00j0504|fyyfj00j0505|fyyfj00j0506|fyyfj00j0507|fyyfj00j0508|fyyfj00j0509|fyyfj00j0510|fyyfj00j0511|fyyfj00j0512|fyyfj00j0513|fyyfj00j0514|fyyfj00j0515|fyyfj00j0516|fyyfj00j0517|fyyfj00j0518|fyyfj00j0521|fyyfj00j0522|fyyfj00j0523|fyyfj00j0524|fyyfj00j0526|fyyfj00j0527|fyyfj00j0528|fyyfj00j0529|fyyfj00j0530|fyyfj00j0531|fyyfj00j0532|fyyfj00j0533|fyyfj00j0534|fyyfj00j0535|fyyfj00j0536|fyyfj00j0537|fyyfj00j0538|fyyfj00j0539|fyyfj00j0540|fyyfj00j0541|fyyfj00j0542|fyyfj00j0543|fyyfj00j0544|fyyfj00j0545|fyyfj00j0546|fyyfj00j0564|fyyfj00j0565|fyyfj00j0566|fyyfj00j0567|fyyfj00j0568|fyyfj00j0569|fyyfj00j0570|fyyfj00j0571|fyyfj00j0572|fyyfj00j0574|fyyfj00j0575|fyyfj00j0576|fyyfj00j0577|fyyfj00j0578|fyyfj00j0579|fyyfj00j0580|fyyfj01j0473|fyyfj02j0473|fyyfj36j0289|fyyfj37j0209|fyyfj37j0289|fyyfj38j0209|fyyfj38j0289|fyyfj39j0209|fyyfj39j0289|fyyfj40j0209|fyyfj40j0289|fyyfj41j0209|fyyfj41j0289|fyyfj42j0209|fyyfj42j0289|fyyfj43j0209|fyyfj43j0289|fyyfj44j0209|fyyfj44j0289|fyyfj45j0104|fyyfj45j0209|fyyfj45j0289|fyyfj46j0104|fyyfj46j0209|fyyfj46j0289|fyyfj47j0104|fyyfj47j0209|fyyfj47j0289|fyyfj48j0104|fyyfj48j0209|fyyfj48j0289|fyyfj49j0104|fyyfj49j0209|fyyfj49j0289|fyyfj50j0104|fyyfj50j0209|fyyfj50j0289|fyyfj50j0500|fyyfj51j0104|fyyfj51j0209|fyyfj51j0289|fyyfj51j0500|fyyfj52j0104|fyyfj52j0209|fyyfj52j0289|fyyfj52j0500|fyyfj53j0104|fyyfj53j0209|fyyfj53j0289|fyyfj53j0500|fyyfj54j0104|fyyfj54j0209|fyyfj54j0289|fyyfj54j0500|fyyfj55j0104|fyyfj55j0209|fyyfj55j0289|fyyfj55j0500|fyyfj56j0104|fyyfj56j0209|fyyfj56j0289|fyyfj56j0500|fyyfj57j0104|fyyfj57j0209|fyyfj57j0289|fyyfj57j0500|fyyfj58j0104|fyyfj58j0209|fyyfj58j0289|fyyfj58j0500|fyyfj59j0104|fyyfj59j0209|fyyfj59j0289|fyyfj59j0500|fyyfj60j0104|fyyfj60j0209|fyyfj60j0289|fyyfj60j0500|fyyfj61j0104|fyyfj61j0209|fyyfj61j0289|fyyfj61j0500|fyyfj62j0104|fyyfj62j0209|fyyfj62j0289|fyyfj62j0500|fyyfj63j0104|fyyfj63j0209|fyyfj63j0289|fyyfj63j0500|fyyfj64j0104|fyyfj64j0107|fyyfj64j0209|fyyfj64j0289|fyyfj64j0500|fyyfj64j0573|fyyfj65j0104|fyyfj65j0107|fyyfj65j0209|fyyfj65j0289|fyyfj65j0500|fyyfj65j0573|fyyfj66j0104|fyyfj66j0107|fyyfj66j0209|fyyfj66j0289|fyyfj66j0500|fyyfj66j0573|fyyfj67j0104|fyyfj67j0107|fyyfj67j0209|fyyfj67j0289|fyyfj67j0500|fyyfj67j0573|fyyfj68j0104|fyyfj68j0107|fyyfj68j0209|fyyfj68j0289|fyyfj68j0500|fyyfj68j0573|fyyfj69j0104|fyyfj69j0107|fyyfj69j0209|fyyfj69j0289|fyyfj69j0500|fyyfj69j0573|fyyfj70j0104|fyyfj70j0107|fyyfj70j0209|fyyfj70j0289|fyyfj70j0472|fyyfj70j0500|fyyfj70j0573|fyyfj71j0104|fyyfj71j0107|fyyfj71j0209|fyyfj71j0289|fyyfj71j0472|fyyfj71j0500|fyyfj71j0573|fyyfj72j0104|fyyfj72j0107|fyyfj72j0209|fyyfj72j0289|fyyfj72j0472|fyyfj72j0500|fyyfj72j0573|fyyfj73j0104|fyyfj73j0107|fyyfj73j0209|fyyfj73j0289|fyyfj73j0472|fyyfj73j0500|fyyfj73j0573|fyyfj74j0104|fyyfj74j0107|fyyfj74j0209|fyyfj74j0289|fyyfj74j0472|fyyfj74j0500|fyyfj74j0573|fyyfj75j0104|fyyfj75j0107|fyyfj75j0108|fyyfj75j0209|fyyfj75j0289|fyyfj75j0472|fyyfj75j0500|fyyfj75j0573|fyyfj76j0104|fyyfj76j0107|fyyfj76j0108|fyyfj76j0209|fyyfj76j0289|fyyfj76j0472|fyyfj76j0500|fyyfj76j0573|fyyfj77j0104|fyyfj77j0107|fyyfj77j0108|fyyfj77j0209|fyyfj77j0289|fyyfj77j0472|fyyfj77j0500|fyyfj77j0573|fyyfj78j0104|fyyfj78j0107|fyyfj78j0108|fyyfj78j0209|fyyfj78j0289|fyyfj78j0472|fyyfj78j0500|fyyfj78j0573|fyyfj79j0104|fyyfj79j0107|fyyfj79j0108|fyyfj79j0209|fyyfj79j0289|fyyfj79j0339|fyyfj79j0472|fyyfj79j0500|fyyfj79j0573|fyyfj80j0104|fyyfj80j0107|fyyfj80j0108|fyyfj80j0209|fyyfj80j0289|fyyfj80j0339|fyyfj80j0352|fyyfj80j0472|fyyfj80j0500|fyyfj80j0573|fyyfj81j0104|fyyfj81j0107|fyyfj81j0108|fyyfj81j0209|fyyfj81j0289|fyyfj81j0339|fyyfj81j0352|fyyfj81j0472|fyyfj81j0500|fyyfj81j0573|fyyfj82j0104|fyyfj82j0107|fyyfj82j0108|fyyfj82j0209|fyyfj82j0289|fyyfj82j0339|fyyfj82j0352|fyyfj82j0472|fyyfj82j0500|fyyfj82j0573|fyyfj83j0104|fyyfj83j0107|fyyfj83j0108|fyyfj83j0209|fyyfj83j0289|fyyfj83j0339|fyyfj83j0352|fyyfj83j0472|fyyfj83j0500|fyyfj83j0573|fyyfj84j0104|fyyfj84j0107|fyyfj84j0108|fyyfj84j0209|fyyfj84j0289|fyyfj84j0339|fyyfj84j0352|fyyfj84j0472|fyyfj84j0500|fyyfj84j0573|fyyfj85j0104|fyyfj85j0107|fyyfj85j0108|fyyfj85j0209|fyyfj85j0289|fyyfj85j0301|fyyfj85j0339|fyyfj85j0352|fyyfj85j0472|fyyfj85j0500|fyyfj85j0573|fyyfj86j0104|fyyfj86j0107|fyyfj86j0108|fyyfj86j0209|fyyfj86j0289|fyyfj86j0301|fyyfj86j0339|fyyfj86j0352|fyyfj86j0472|fyyfj86j0500|fyyfj86j0573|fyyfj87j0067|fyyfj87j0104|fyyfj87j0107|fyyfj87j0108|fyyfj87j0209|fyyfj87j0289|fyyfj87j0301|fyyfj87j0339|fyyfj87j0352|fyyfj87j0472|fyyfj87j0500|fyyfj87j0573|fyyfj88j0067|fyyfj88j0104|fyyfj88j0107|fyyfj88j0108|fyyfj88j0209|fyyfj88j0289|fyyfj88j0301|fyyfj88j0339|fyyfj88j0352|fyyfj88j0472|fyyfj88j0500|fyyfj88j0573|fyyfj89j0067|fyyfj89j0104|fyyfj89j0107|fyyfj89j0108|fyyfj89j0209|fyyfj89j0289|fyyfj89j0301|fyyfj89j0339|fyyfj89j0352|fyyfj89j0358|fyyfj89j0472|fyyfj89j0500|fyyfj89j0573|fyyfj90j0067|fyyfj90j0104|fyyfj90j0107|fyyfj90j0108|fyyfj90j0209|fyyfj90j0289|fyyfj90j0301|fyyfj90j0321|fyyfj90j0339|fyyfj90j0352|fyyfj90j0358|fyyfj90j0452|fyyfj90j0472|fyyfj90j0500|fyyfj90j0573|fyyfj91j0067|fyyfj91j0104|fyyfj91j0107|fyyfj91j0108|fyyfj91j0209|fyyfj91j0289|fyyfj91j0301|fyyfj91j0321|fyyfj91j0339|fyyfj91j0352|fyyfj91j0358|fyyfj91j0452|fyyfj91j0472|fyyfj91j0500|fyyfj91j0573|fyyfj92j0067|fyyfj92j0104|fyyfj92j0107|fyyfj92j0108|fyyfj92j0209|fyyfj92j0289|fyyfj92j0301|fyyfj92j0321|fyyfj92j0339|fyyfj92j0352|fyyfj92j0358|fyyfj92j0452|fyyfj92j0472|fyyfj92j0500|fyyfj92j0573|fyyfj93j0067|fyyfj93j0099|fyyfj93j0104|fyyfj93j0107|fyyfj93j0108|fyyfj93j0209|fyyfj93j0289|fyyfj93j0301|fyyfj93j0321|fyyfj93j0352|fyyfj93j0358|fyyfj93j0452|fyyfj93j0472|fyyfj93j0500|fyyfj93j0573|fyyfj94j0067|fyyfj94j0099|fyyfj94j0104|fyyfj94j0107|fyyfj94j0108|fyyfj94j0209|fyyfj94j0211|fyyfj94j0289|fyyfj94j0301|fyyfj94j0321|fyyfj94j0352|fyyfj94j0358|fyyfj94j0359|fyyfj94j0452|fyyfj94j0472|fyyfj94j0500|fyyfj94j0573|fyyfj95j0067|fyyfj95j0099|fyyfj95j0104|fyyfj95j0107|fyyfj95j0108|fyyfj95j0209|fyyfj95j0211|fyyfj95j0289|fyyfj95j0298|fyyfj95j0301|fyyfj95j0321|fyyfj95j0339|fyyfj95j0352|fyyfj95j0358|fyyfj95j0359|fyyfj95j0414|fyyfj95j0452|fyyfj95j0472|fyyfj95j0500|fyyfj95j0573|fyyfj96j0067|fyyfj96j0099|fyyfj96j0104|fyyfj96j0107|fyyfj96j0108|fyyfj96j0209|fyyfj96j0211|fyyfj96j0289|fyyfj96j0298|fyyfj96j0301|fyyfj96j0321|fyyfj96j0339|fyyfj96j0352|fyyfj96j0358|fyyfj96j0359|fyyfj96j0414|fyyfj96j0452|fyyfj96j0472|fyyfj96j0500|fyyfj96j0573|fyyfj97j0067|fyyfj97j0099|fyyfj97j0100|fyyfj97j0104|fyyfj97j0107|fyyfj97j0108|fyyfj97j0209|fyyfj97j0211|fyyfj97j0289|fyyfj97j0298|fyyfj97j0301|fyyfj97j0321|fyyfj97j0339|fyyfj97j0352|fyyfj97j0358|fyyfj97j0359|fyyfj97j0414|fyyfj97j0445|fyyfj97j0452|fyyfj97j0472|fyyfj97j0500|fyyfj97j0573|fyyfj98j0067|fyyfj98j0099|fyyfj98j0100|fyyfj98j0104|fyyfj98j0107|fyyfj98j0108|fyyfj98j0178|fyyfj98j0209|fyyfj98j0211|fyyfj98j0289|fyyfj98j0298|fyyfj98j0301|fyyfj98j0303|fyyfj98j0321|fyyfj98j0339|fyyfj98j0352|fyyfj98j0358|fyyfj98j0359|fyyfj98j0413|fyyfj98j0414|fyyfj98j0445|fyyfj98j0452|fyyfj98j0472|fyyfj98j0500|fyyfj98j0573|fyyfj99j0067|fyyfj99j0099|fyyfj99j0100|fyyfj99j0104|fyyfj99j0107|fyyfj99j0108|fyyfj99j0131|fyyfj99j0209|fyyfj99j0211|fyyfj99j0285|fyyfj99j0289|fyyfj99j0298|fyyfj99j0301|fyyfj99j0303|fyyfj99j0321|fyyfj99j0339|fyyfj99j0352|fyyfj99j0358|fyyfj99j0359|fyyfj99j0413|fyyfj99j0414|fyyfj99j0445|fyyfj99j0452|fyyfj99j0472|fyyfj99j0500|fyyfj99j0573|fyyfm01j0064|fyyfm01j0070|fyyfm01j0071|fyyfm01j0088|fyyfm01j0091|fyyfm01j0108|fyyfm01j0111|fyyfm01j0112|fyyfm01j0114|fyyfm01j0115|fyyfm01j0133|fyyfm01j0140|fyyfm01j0141|fyyfm01j0142|fyyfm01j0143|fyyfm01j0148|fyyfm01j0149|fyyfm01j0152|fyyfm01j0153|fyyfm01j0155|fyyfm01j0159|fyyfm01j0160|fyyfm01j0163|fyyfm01j0165|fyyfm01j0168|fyyfm01j0169|fyyfm01j0221|fyyfm01j0223|fyyfm01j0268|fyyfm01j0271|fyyfm01j0285|fyyfm01j0299|fyyfm01j0320|fyyfm01j0321|fyyfm01j0360|fyyfm01j0369|fyyfm01j0400|fyyfm01j0401|fyyfm01j0411|fyyfm01j0572|fyyfm01j0765|fyyfm02j0064|fyyfm02j0069|fyyfm02j0070|fyyfm02j0071|fyyfm02j0088|fyyfm02j0091|fyyfm02j0108|fyyfm02j0111|fyyfm02j0112|fyyfm02j0114|fyyfm02j0115|fyyfm02j0133|fyyfm02j0140|fyyfm02j0141|fyyfm02j0142|fyyfm02j0143|fyyfm02j0148|fyyfm02j0149|fyyfm02j0152|fyyfm02j0153|fyyfm02j0155|fyyfm02j0159|fyyfm02j0160|fyyfm02j0163|fyyfm02j0165|fyyfm02j0168|fyyfm02j0169|fyyfm02j0221|fyyfm02j0223|fyyfm02j0268|fyyfm02j0271|fyyfm02j0285|fyyfm02j0299|fyyfm02j0320|fyyfm02j0321|fyyfm02j0360|fyyfm02j0369|fyyfm02j0400|fyyfm02j0572|fyyfm02j0765|fyyfm03j0064|fyyfm03j0070|fyyfm03j0091|fyyfm03j0108|fyyfm03j0111|fyyfm03j0115|fyyfm03j0160|fyyfm03j0165|fyyfm03j0299|fyyfm03j0400|fyyfm03j0572|fyyfm04j0111|fyyfm51j0064|fyyfm51j0369|fyyfm52j0064|fyyfm52j0369|fyyfr88j0003|fyyfr89j0003|fyyff98j0071|fyyff98j0303|fyyff99j0029|fyyff99j0303|fyefj00j0112|fyefj00j0545|fyefj00j0546|fyefj00j0633|fyefj00j0634|fyefj00j0635|fyefj00j0636|fyefj00j0637|fyefj00j0649|fyefj00j0651|fyefj00j0652|fyefj00j0656|fyefj00j0657|fyefj00j0658|fyefj00j0659|fyefj00j0660|fyefj00j0685|fyefj00j0686|fyefj00j0688|fyefj00j0701|fyefj00j0702|fyefj00j0703|fyefj00j0715|fyefj00j0720|fyefj00j0721|fyefj00j0722|fyefj00j0724|fyefj00j0725|fyefj00j0726|fyefj00j0731|fyefj00j0751|fyefj00j0752|fyefj00j0756|fyefj00j0757|fyefj00j0758|fyefj00j0759|fyefj00j0761|fyefj00j0762|fyefj00j0763|fyefj00j0764|fyefj00j0768|fyefj00j0769|fyefj00j0785|fyefj00j0786|fyefj00j0789|fyefj00j0790|fyefj00j0793|fyefj00j0794|fyefj00j0803|fyefj00j0811|fyefj00j0821|fyefj00j0822|fyefj00j0823|fyefj00j0824|fyefj00j0825|fyefj00j0826|fyefj00j0827|fyefj00j0828|fyefj00j0829|fyefj00j0831|fyefj00j0832|fyefj00j0833|fyefj00j0838|fyefj00j0839|fyefj00j0840|fyefj00j0854|fyefj00j0855|fyefj00j0856|fyefj00j0859|fyefj00j0860|fyefj00j0861|fyefj00j0869|fyefj00j0870|fyefj00j0879|fyefj00j0887|fyefj00j0888|fyefj00j0889|fyefj00j0900|fyefj00j0901|fyefj00j0903|fyefj00j0904|fyefj00j0905|fyefj00j0959|fyefj00j0960|fyefj00j0961|fyefj00j1004|fyefj00j1005|fyefj00j1012|fyefj00j1013|fyefj00j1014|fyefj00j1015|fyefj00j1016|fyefj00j1017|fyefj00j1018|fyefj00j1019|fyefj00j1020|fyefj00j1021|fyefj00j1218|fyefj00j1219|fyefj00j1220|fyefj00j1221|fyefj00j1222|fyefj00j1811|fyefj00j1854|fyefj00j1855|fyefj00j1856|fyefj01j0707|fyefj02j0707|fyefj03j0707|fyefj66j0001|fyefj67j0001|fyefj68j0001|fyefj68j1064|fyefj69j0001|fyefj69j1064|fyefj70j0001|fyefj70j0859|fyefj70j1064|fyefj71j0001|fyefj71j1064|fyefj72j0001|fyefj72j1064|fyefj73j0001|fyefj73j1064|fyefj74j0001|fyefj74j1064|fyefj75j0001|fyefj75j1064|fyefj75j1092|fyefj76j0001|fyefj76j1064|fyefj76j1092|fyefj77j0001|fyefj77j1064|fyefj77j1092|fyefj78j0001|fyefj78j1064|fyefj78j1092|fyefj79j0001|fyefj79j1064|fyefj79j1092|fyefj80j0001|fyefj80j0859|fyefj80j1064|fyefj80j1077|fyefj80j1092|fyefj81j0001|fyefj81j1064|fyefj81j1077|fyefj81j1092|fyefj82j0001|fyefj82j1064|fyefj82j1092|fyefj83j0001|fyefj83j1064|fyefj83j1092|fyefj84j0001|fyefj84j1064|fyefj84j1092|fyefj85j0001|fyefj85j0356|fyefj85j1064|fyefj85j1092|fyefj86j0001|fyefj86j0356|fyefj86j1064|fyefj87j0001|fyefj87j0356|fyefj87j1064|fyefj88j0001|fyefj88j0356|fyefj88j1064|fyefj89j0001|fyefj89j0356|fyefj89j1064|fyefj89j1067|fyefj90j0001|fyefj90j0758|fyefj90j1021|fyefj90j1064|fyefj90j1067|fyefj91j0001|fyefj91j0758|fyefj91j0791|fyefj91j1021|fyefj91j1064|fyefj91j1067|fyefj91j1077|fyefj92j0001|fyefj92j0359|fyefj92j0678|fyefj92j0758|fyefj92j0791|fyefj92j0867|fyefj92j1021|fyefj92j1064|fyefj92j1077|fyefj93j0001|fyefj93j0359|fyefj93j0678|fyefj93j0758|fyefj93j0791|fyefj93j0867|fyefj93j1010|fyefj93j1021|fyefj93j1049|fyefj93j1064|fyefj93j1077|fyefj94j0001|fyefj94j0678|fyefj94j0758|fyefj94j0791|fyefj94j0867|fyefj94j1010|fyefj94j1021|fyefj94j1049|fyefj94j1064|fyefj94j1070|fyefj94j1077|fyefj94j1085|fyefj95j0001|fyefj95j0678|fyefj95j0758|fyefj95j0791|fyefj95j0867|fyefj95j0965|fyefj95j0966|fyefj95j1010|fyefj95j1011|fyefj95j1021|fyefj95j1055|fyefj95j1064|fyefj95j1069|fyefj95j1077|fyefj95j1085|fyefj95j1089|fyefj96j0001|fyefj96j0106|fyefj96j0671|fyefj96j0678|fyefj96j0758|fyefj96j0791|fyefj96j0814|fyefj96j0836|fyefj96j0867|fyefj96j0931|fyefj96j0965|fyefj96j0966|fyefj96j0976|fyefj96j1010|fyefj96j1021|fyefj96j1051|fyefj96j1055|fyefj96j1064|fyefj96j1068|fyefj96j1070|fyefj96j1077|fyefj96j1079|fyefj96j1081|fyefj96j1086|fyefj96j1088|fyefj96j1091|fyefj96j1093|fyefj96j1094|fyefj97j0001|fyefj97j0106|fyefj97j0584|fyefj97j0586|fyefj97j0671|fyefj97j0678|fyefj97j0758|fyefj97j0791|fyefj97j0814|fyefj97j0825|fyefj97j0836|fyefj97j0863|fyefj97j0865|fyefj97j0867|fyefj97j0914|fyefj97j0931|fyefj97j0952|fyefj97j0965|fyefj97j0966|fyefj97j0969|fyefj97j0971|fyefj97j0972|fyefj97j0976|fyefj97j0985|fyefj97j1010|fyefj97j1021|fyefj97j1051|fyefj97j1052|fyefj97j1055|fyefj97j1058|fyefj97j1059|fyefj97j1064|fyefj97j1068|fyefj97j1077|fyefj97j1079|fyefj97j1081|fyefj97j1086|fyefj97j1088|fyefj97j1095|fyefj98j0001|fyefj98j0243|fyefj98j0326|fyefj98j0329|fyefj98j0343|fyefj98j0344|fyefj98j0380|fyefj98j0472|fyefj98j0584|fyefj98j0586|fyefj98j0604|fyefj98j0671|fyefj98j0673|fyefj98j0676|fyefj98j0677|fyefj98j0678|fyefj98j0694|fyefj98j0758|fyefj98j0814|fyefj98j0825|fyefj98j0836|fyefj98j0863|fyefj98j0865|fyefj98j0867|fyefj98j0896|fyefj98j0898|fyefj98j0901|fyefj98j0906|fyefj98j0910|fyefj98j0913|fyefj98j0914|fyefj98j0922|fyefj98j0931|fyefj98j0934|fyefj98j0936|fyefj98j0951|fyefj98j0952|fyefj98j0963|fyefj98j0965|fyefj98j0966|fyefj98j0969|fyefj98j0971|fyefj98j0972|fyefj98j0974|fyefj98j0975|fyefj98j0976|fyefj98j0977|fyefj98j0978|fyefj98j0985|fyefj98j0992|fyefj98j1008|fyefj98j1009|fyefj98j1010|fyefj98j1011|fyefj98j1012|fyefj98j1019|fyefj98j1021|fyefj98j1028|fyefj98j1034|fyefj98j1039|fyefj98j1046|fyefj98j1047|fyefj98j1048|fyefj98j1054|fyefj98j1055|fyefj98j1064|fyefj98j1068|fyefj98j1077|fyefj98j1079|fyefj98j1080|fyefj98j1081|fyefj98j1082|fyefj98j1084|fyefj98j1087|fyefj98j1088|fyefj98j1090|fyefj99j0010|fyefj99j0188|fyefj99j0243|fyefj99j0268|fyefj99j0280|fyefj99j0301|fyefj99j0329|fyefj99j0343|fyefj99j0344|fyefj99j0380|fyefj99j0552|fyefj99j0573|fyefj99j0584|fyefj99j0586|fyefj99j0604|fyefj99j0671|fyefj99j0673|fyefj99j0676|fyefj99j0677|fyefj99j0678|fyefj99j0694|fyefj99j0722|fyefj99j0757|fyefj99j0758|fyefj99j0771|fyefj99j0772|fyefj99j0804|fyefj99j0806|fyefj99j0809|fyefj99j0814|fyefj99j0825|fyefj99j0836|fyefj99j0862|fyefj99j0863|fyefj99j0865|fyefj99j0866|fyefj99j0867|fyefj99j0875|fyefj99j0896|fyefj99j0898|fyefj99j0901|fyefj99j0906|fyefj99j0907|fyefj99j0908|fyefj99j0910|fyefj99j0912|fyefj99j0913|fyefj99j0914|fyefj99j0921|fyefj99j0922|fyefj99j0923|fyefj99j0931|fyefj99j0934|fyefj99j0936|fyefj99j0937|fyefj99j0949|fyefj99j0951|fyefj99j0952|fyefj99j0962|fyefj99j0963|fyefj99j0965|fyefj99j0966|fyefj99j0969|fyefj99j0971|fyefj99j0972|fyefj99j0974|fyefj99j0975|fyefj99j0976|fyefj99j0977|fyefj99j0978|fyefj99j0982|fyefj99j0985|fyefj99j0986|fyefj99j0988|fyefj99j0991|fyefj99j0992|fyefj99j0995|fyefj99j0997|fyefj99j0999|fyefj99j1003|fyefj99j1006|fyefj99j1008|fyefj99j1009|fyefj99j1010|fyefj99j1011|fyefj99j1016|fyefj99j1019|fyefj99j1020|fyefj99j1021|fyefj99j1024|fyefj99j1026|fyefj99j1028|fyefj99j1031|fyefj99j1033|fyefj99j1034|fyefj99j1036|fyefj99j1039|fyefj99j1042|fyefj99j1045|fyefj99j1046|fyefj99j1048|fyefj99j1053|fyefj99j1054|fyefj99j1055|fyefj99j1061|fyefj99j1062|fyefj99j1063|fyefj99j1064|fyefj99j1068|fyefj99j1072|fyefj99j1076|fyefj99j1077|fyefj99j1079|fyefj99j1080|fyefj99j1081|fyefj99j1083|fyefj99j1084|fyefj99j1087|fyefj99j1088|fyefm00j0113|fyefm01j0057|fyefm01j0088|fyefm01j0091|fyefm01j0101|fyefm01j0104|fyefm01j0107|fyefm01j0112|fyefm01j0379|fyefm02j0057|fyefm02j0101|fyefm02j0104|fyefm02j0107|fyefm02j0112|fyefm02j0379|fyefm98j0066|fyefm99j0066|fyefm99j0090|fyefm99j0093|fyefm99j0110|fyefm99j0165|fyefm99j0208|fyefm99j0209|fyefm99j0295|fyefm99j0401|fyefm99j0402|fyefm99j0907|fyefm99j1054|fyefn98j0015|fyefn98j0024|fyefn98j0030|fyefn99j0015|fyefn99j0024|fyefn99j0030|fyefr94j0559|fyefr95j0559|fyefr96j0559|fyefr97j0559|fyefr98j0559|fyefr99j0012|fyefr99j0559|fyefb01305|fyeff00j0170|fyeff00j0224|fyeff00j0227|fyeff00j0228|fyeff00j0229|fyeff00j0280|fyeff00j0281|fyeff00j0282|fyeff00j0283|fyeff00j0288|fyeff00j0289|fyeff00j0331|fyeff00j0332|fyeff00j0333|fyeff00j0334|fyeff00j0335|fyeff00j0336|fyeff00j0337|fyeff00j0338|fyeff00j0346|fyeff00j0347|fyeff00j0348|fyeff00j0349|fyeff00j0350|fyeff00j0351|fyeff00j0357|fyeff00j0358|fyeff00j0371|fyeff00j0372|fyeff00j0396|fyeff00j0397|fyeff00j0424|fyeff00j0425|fyeff01j0416|fyeff02j0416|fyeff78j0418|fyeff79j0418|fyeff79j1051|fyeff80j1051|fyeff81j1051|fyeff82j1051|fyeff83j1051|fyeff84j1051|fyeff85j1051|fyeff86j1051|fyeff87j1051|fyeff88j0422|fyeff89j0422|fyeff90j0422|fyeff90j0434|fyeff90j0440|fyeff91j0422|fyeff91j0434|fyeff91j0440|fyeff92j0440|fyeff93j0440|fyeff93j1045|fyeff93j1067|fyeff94j0392|fyeff94j0440|fyeff94j0443|fyeff94j1045|fyeff94j1067|fyeff95j0219|fyeff95j0392|fyeff95j0439|fyeff95j0440|fyeff95j0443|fyeff96j0053|fyeff96j0219|fyeff96j0392|fyeff96j0429|fyeff96j0434|fyeff96j0950|fyeff96j1019|fyeff96j1028|fyeff97j0053|fyeff97j0178|fyeff97j0191|fyeff97j0219|fyeff97j0221|fyeff97j0258|fyeff97j0324|fyeff97j0355|fyeff97j0370|fyeff97j0377|fyeff97j0392|fyeff97j0429|fyeff97j0434|fyeff97j0950|fyeff97j1019|fyeff98j0053|fyeff98j0065|fyeff98j0101|fyeff98j0144|fyeff98j0156|fyeff98j0178|fyeff98j0191|fyeff98j0193|fyeff98j0196|fyeff98j0197|fyeff98j0209|fyeff98j0210|fyeff98j0211|fyeff98j0214|fyeff98j0215|fyeff98j0218|fyeff98j0219|fyeff98j0221|fyeff98j0258|fyeff98j0260|fyeff98j0279|fyeff98j0284|fyeff98j0295|fyeff98j0296|fyeff98j0298|fyeff98j0324|fyeff98j0355|fyeff98j0370|fyeff98j0376|fyeff98j0379|fyeff98j0381|fyeff98j0392|fyeff98j0401|fyeff98j0404|fyeff98j0405|fyeff98j0407|fyeff98j0411|fyeff98j0418|fyeff98j0421|fyeff98j0423|fyeff98j0433|fyeff98j0436|fyeff98j0673|fyeff98j0896|fyeff98j0950|fyeff98j0985|fyeff98j1012|fyeff99j0053|fyeff99j0065|fyeff99j0152|fyeff99j0156|fyeff99j0159|fyeff99j0178|fyeff99j0191|fyeff99j0193|fyeff99j0196|fyeff99j0197|fyeff99j0209|fyeff99j0210|fyeff99j0211|fyeff99j0214|fyeff99j0215|fyeff99j0218|fyeff99j0219|fyeff99j0220|fyeff99j0221|fyeff99j0260|fyeff99j0279|fyeff99j0284|fyeff99j0291|fyeff99j0295|fyeff99j0296|fyeff99j0297|fyeff99j0298|fyeff99j0324|fyeff99j0339|fyeff99j0355|fyeff99j0370|fyeff99j0376|fyeff99j0379|fyeff99j0381|fyeff99j0392|fyeff99j0401|fyeff99j0404|fyeff99j0405|fyeff99j0407|fyeff99j0410|fyeff99j0411|fyeff99j0413|fyeff99j0414|fyeff99j0415|fyeff99j0418|fyeff99j0421|fyeff99j0423|fyeff99j0436|fyeff99j0673|fyeff99j0896|fyeff99j0950|fyeff99j0962|fyeff99j0985|fyeff99j1010|fyeff99j1012|fyeff99j1028|fyeff99j1090|fyeff99j1370|fayfm01j0148|fayfm01j0149|fayfm01j0155|fayfm02j0148|fayfm02j0149|fayfm02j0155|faefj00j0594|faefj00j0595|faefj00j0596|faefj00j0597|faefj01j0707|faefj02j0707|faefj03j0707|faefj90j1023|faefj91j1023|faefj92j1023|faefj94j1056|faefj95j1023|faefj95j1056|faefj96j1056|faefj98j1038|faefj99j1078|fdeff99j9001|fdeff99j9002|gyefj99j0005", + // A long case insensitive alternation. + "(?i:(zQPbMkNO|NNSPdvMi|iWuuSoAl|qbvKMimS|IecrXtPa|seTckYqt|NxnyHkgB|fIDlOgKb|UhlWIygH|OtNoJxHG|cUTkFVIV|mTgFIHjr|jQkoIDtE|PPMKxRXl|AwMfwVkQ|CQyMrTQJ|BzrqxVSi|nTpcWuhF|PertdywG|ZZDgCtXN|WWdDPyyE|uVtNQsKk|BdeCHvPZ|wshRnFlH|aOUIitIp|RxZeCdXT|CFZMslCj|AVBZRDxl|IzIGCnhw|ythYuWiz|oztXVXhl|VbLkwqQx|qvaUgyVC|VawUjPWC|ecloYJuj|boCLTdSU|uPrKeAZx|hrMWLWBq|JOnUNHRM|rYnujkPq|dDEdZhIj|DRrfvugG|yEGfDxVV|YMYdJWuP|PHUQZNWM|AmKNrLis|zTxndVfn|FPsHoJnc|EIulZTua|KlAPhdzg|ScHJJCLt|NtTfMzME|eMCwuFdo|SEpJVJbR|cdhXZeCx|sAVtBwRh|kVFEVcMI|jzJrxraA|tGLHTell|NNWoeSaw|DcOKSetX|UXZAJyka|THpMphDP|rizheevl|kDCBRidd|pCZZRqyu|pSygkitl|SwZGkAaW|wILOrfNX|QkwVOerj|kHOMxPDr|EwOVycJv|AJvtzQFS|yEOjKYYB|LizIINLL|JBRSsfcG|YPiUqqNl|IsdEbvee|MjEpGcBm|OxXZVgEQ|xClXGuxa|UzRCGFEb|buJbvfvA|IPZQxRet|oFYShsMc|oBHffuHO|bzzKrcBR|KAjzrGCl|IPUsAVls|OGMUMbIU|gyDccHuR|bjlalnDd|ZLWjeMna|fdsuIlxQ|dVXtiomV|XxedTjNg|XWMHlNoA|nnyqArQX|opfkWGhb|wYtnhdYb))", + "(?i:(AAAAAAAAAAAAAAAAAAAAAAAA|BBBBBBBBBBBBBBBBBBBBBBBB|cccccccccccccccccccccccC|ſſſſſſſſſſſſſſſſſſſſſſſſS|SSSSSSSSSSSSSSSSSSSSSSSSſ))", + // A short case insensitive alternation where each entry ends with ".*". + "(?i:(zQPbMkNO.*|NNSPdvMi.*|iWuuSoAl.*))", + // A long case insensitive alternation where each entry ends with ".*". + "(?i:(zQPbMkNO.*|NNSPdvMi.*|iWuuSoAl.*|qbvKMimS.*|IecrXtPa.*|seTckYqt.*|NxnyHkgB.*|fIDlOgKb.*|UhlWIygH.*|OtNoJxHG.*|cUTkFVIV.*|mTgFIHjr.*|jQkoIDtE.*|PPMKxRXl.*|AwMfwVkQ.*|CQyMrTQJ.*|BzrqxVSi.*|nTpcWuhF.*|PertdywG.*|ZZDgCtXN.*|WWdDPyyE.*|uVtNQsKk.*|BdeCHvPZ.*|wshRnFlH.*|aOUIitIp.*|RxZeCdXT.*|CFZMslCj.*|AVBZRDxl.*|IzIGCnhw.*|ythYuWiz.*|oztXVXhl.*|VbLkwqQx.*|qvaUgyVC.*|VawUjPWC.*|ecloYJuj.*|boCLTdSU.*|uPrKeAZx.*|hrMWLWBq.*|JOnUNHRM.*|rYnujkPq.*|dDEdZhIj.*|DRrfvugG.*|yEGfDxVV.*|YMYdJWuP.*|PHUQZNWM.*|AmKNrLis.*|zTxndVfn.*|FPsHoJnc.*|EIulZTua.*|KlAPhdzg.*|ScHJJCLt.*|NtTfMzME.*|eMCwuFdo.*|SEpJVJbR.*|cdhXZeCx.*|sAVtBwRh.*|kVFEVcMI.*|jzJrxraA.*|tGLHTell.*|NNWoeSaw.*|DcOKSetX.*|UXZAJyka.*|THpMphDP.*|rizheevl.*|kDCBRidd.*|pCZZRqyu.*|pSygkitl.*|SwZGkAaW.*|wILOrfNX.*|QkwVOerj.*|kHOMxPDr.*|EwOVycJv.*|AJvtzQFS.*|yEOjKYYB.*|LizIINLL.*|JBRSsfcG.*|YPiUqqNl.*|IsdEbvee.*|MjEpGcBm.*|OxXZVgEQ.*|xClXGuxa.*|UzRCGFEb.*|buJbvfvA.*|IPZQxRet.*|oFYShsMc.*|oBHffuHO.*|bzzKrcBR.*|KAjzrGCl.*|IPUsAVls.*|OGMUMbIU.*|gyDccHuR.*|bjlalnDd.*|ZLWjeMna.*|fdsuIlxQ.*|dVXtiomV.*|XxedTjNg.*|XWMHlNoA.*|nnyqArQX.*|opfkWGhb.*|wYtnhdYb.*))", + // A long case insensitive alternation where each entry starts with ".*". + "(?i:(.*zQPbMkNO|.*NNSPdvMi|.*iWuuSoAl|.*qbvKMimS|.*IecrXtPa|.*seTckYqt|.*NxnyHkgB|.*fIDlOgKb|.*UhlWIygH|.*OtNoJxHG|.*cUTkFVIV|.*mTgFIHjr|.*jQkoIDtE|.*PPMKxRXl|.*AwMfwVkQ|.*CQyMrTQJ|.*BzrqxVSi|.*nTpcWuhF|.*PertdywG|.*ZZDgCtXN|.*WWdDPyyE|.*uVtNQsKk|.*BdeCHvPZ|.*wshRnFlH|.*aOUIitIp|.*RxZeCdXT|.*CFZMslCj|.*AVBZRDxl|.*IzIGCnhw|.*ythYuWiz|.*oztXVXhl|.*VbLkwqQx|.*qvaUgyVC|.*VawUjPWC|.*ecloYJuj|.*boCLTdSU|.*uPrKeAZx|.*hrMWLWBq|.*JOnUNHRM|.*rYnujkPq|.*dDEdZhIj|.*DRrfvugG|.*yEGfDxVV|.*YMYdJWuP|.*PHUQZNWM|.*AmKNrLis|.*zTxndVfn|.*FPsHoJnc|.*EIulZTua|.*KlAPhdzg|.*ScHJJCLt|.*NtTfMzME|.*eMCwuFdo|.*SEpJVJbR|.*cdhXZeCx|.*sAVtBwRh|.*kVFEVcMI|.*jzJrxraA|.*tGLHTell|.*NNWoeSaw|.*DcOKSetX|.*UXZAJyka|.*THpMphDP|.*rizheevl|.*kDCBRidd|.*pCZZRqyu|.*pSygkitl|.*SwZGkAaW|.*wILOrfNX|.*QkwVOerj|.*kHOMxPDr|.*EwOVycJv|.*AJvtzQFS|.*yEOjKYYB|.*LizIINLL|.*JBRSsfcG|.*YPiUqqNl|.*IsdEbvee|.*MjEpGcBm|.*OxXZVgEQ|.*xClXGuxa|.*UzRCGFEb|.*buJbvfvA|.*IPZQxRet|.*oFYShsMc|.*oBHffuHO|.*bzzKrcBR|.*KAjzrGCl|.*IPUsAVls|.*OGMUMbIU|.*gyDccHuR|.*bjlalnDd|.*ZLWjeMna|.*fdsuIlxQ|.*dVXtiomV|.*XxedTjNg|.*XWMHlNoA|.*nnyqArQX|.*opfkWGhb|.*wYtnhdYb))", + // Quest ".?". + "fo.?", + "foo.?", + "f.?o", + ".*foo.?", + ".?foo.+", + "foo.?|bar", + "ſſs", + // Concat of literals and wildcards. + ".*-.*-.*-.*-.*", + "(.+)-(.+)-(.+)-(.+)-(.+)", + "((.*))(?i:f)((.*))o((.*))o((.*))", + "((.*))f((.*))(?i:o)((.*))o((.*))", } + values = []string{ + "foo", " foo bar", "bar", "buzz\nbar", "bar foo", "bfoo", "\n", "\nfoo", "foo\n", "hello foo world", "hello foo\n world", "", + "FOO", "Foo", "fOo", "foO", "OO", "Oo", "\nfoo\n", strings.Repeat("f", 20), "prometheus", "prometheus_api_v1", "prometheus_api_v1_foo", + "10.0.1.20", "10.0.2.10", "10.0.3.30", "10.0.4.40", + "foofoo0", "foofoo", "😀foo0", "ſſs", "ſſS", "AAAAAAAAAAAAAAAAAAAAAAAA", "BBBBBBBBBBBBBBBBBBBBBBBB", "cccccccccccccccccccccccC", "ſſſſſſſſſſſſſſſſſſſſſſſſS", "SSSSSSSSSSSSSSSSSSSSSSSSſ", - for _, c := range cases { - m, err := NewFastRegexMatcher(c.regex) - require.NoError(t, err) - require.Equal(t, c.expected, m.MatchString(c.value)) + // Values matching / not matching the test regexps on long alternations. + "zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX", + + // Invalid utf8 + "\xfefoo", + "foo\xfe", + "\xfd", + "\xff\xff", } +) + +func TestFastRegexMatcher_MatchString(t *testing.T) { + // Run the test both against a set of predefined values and a set of random ones. + testValues := append([]string{}, values...) + testValues = append(testValues, generateRandomValues()...) + + for _, r := range regexes { + r := r + for _, v := range testValues { + v := v + t.Run(readable(r)+` on "`+readable(v)+`"`, func(t *testing.T) { + t.Parallel() + m, err := NewFastRegexMatcher(r) + require.NoError(t, err) + re := regexp.MustCompile("^(?s:" + r + ")$") + require.Equal(t, re.MatchString(v), m.MatchString(v)) + }) + } + } +} + +func readable(s string) string { + const maxReadableStringLen = 40 + if len(s) < maxReadableStringLen { + return s + } + return s[:maxReadableStringLen] + "..." } func TestOptimizeConcatRegex(t *testing.T) { @@ -65,30 +141,33 @@ func TestOptimizeConcatRegex(t *testing.T) { regex string prefix string suffix string - contains string + contains []string }{ - {regex: "foo(hello|bar)", prefix: "foo", suffix: "", contains: ""}, - {regex: "foo(hello|bar)world", prefix: "foo", suffix: "world", contains: ""}, - {regex: "foo.*", prefix: "foo", suffix: "", contains: ""}, - {regex: "foo.*hello.*bar", prefix: "foo", suffix: "bar", contains: "hello"}, - {regex: ".*foo", prefix: "", suffix: "foo", contains: ""}, - {regex: "^.*foo$", prefix: "", suffix: "foo", contains: ""}, - {regex: ".*foo.*", prefix: "", suffix: "", contains: "foo"}, - {regex: ".*foo.*bar.*", prefix: "", suffix: "", contains: "foo"}, - {regex: ".*(foo|bar).*", prefix: "", suffix: "", contains: ""}, - {regex: ".*[abc].*", prefix: "", suffix: "", contains: ""}, - {regex: ".*((?i)abc).*", prefix: "", suffix: "", contains: ""}, - {regex: ".*(?i:abc).*", prefix: "", suffix: "", contains: ""}, - {regex: "(?i:abc).*", prefix: "", suffix: "", contains: ""}, - {regex: ".*(?i:abc)", prefix: "", suffix: "", contains: ""}, - {regex: ".*(?i:abc)def.*", prefix: "", suffix: "", contains: "def"}, - {regex: "(?i).*(?-i:abc)def", prefix: "", suffix: "", contains: "abc"}, - {regex: ".*(?msU:abc).*", prefix: "", suffix: "", contains: "abc"}, - {regex: "[aA]bc.*", prefix: "", suffix: "", contains: "bc"}, + {regex: "foo(hello|bar)", prefix: "foo", suffix: "", contains: nil}, + {regex: "foo(hello|bar)world", prefix: "foo", suffix: "world", contains: nil}, + {regex: "foo.*", prefix: "foo", suffix: "", contains: nil}, + {regex: "foo.*hello.*bar", prefix: "foo", suffix: "bar", contains: []string{"hello"}}, + {regex: ".*foo", prefix: "", suffix: "foo", contains: nil}, + {regex: "^.*foo$", prefix: "", suffix: "foo", contains: nil}, + {regex: ".*foo.*", prefix: "", suffix: "", contains: []string{"foo"}}, + {regex: ".*foo.*bar.*", prefix: "", suffix: "", contains: []string{"foo", "bar"}}, + {regex: ".*(foo|bar).*", prefix: "", suffix: "", contains: nil}, + {regex: ".*[abc].*", prefix: "", suffix: "", contains: nil}, + {regex: ".*((?i)abc).*", prefix: "", suffix: "", contains: nil}, + {regex: ".*(?i:abc).*", prefix: "", suffix: "", contains: nil}, + {regex: "(?i:abc).*", prefix: "", suffix: "", contains: nil}, + {regex: ".*(?i:abc)", prefix: "", suffix: "", contains: nil}, + {regex: ".*(?i:abc)def.*", prefix: "", suffix: "", contains: []string{"def"}}, + {regex: "(?i).*(?-i:abc)def", prefix: "", suffix: "", contains: []string{"abc"}}, + {regex: ".*(?msU:abc).*", prefix: "", suffix: "", contains: []string{"abc"}}, + {regex: "[aA]bc.*", prefix: "", suffix: "", contains: []string{"bc"}}, + {regex: "^5..$", prefix: "5", suffix: "", contains: nil}, + {regex: "^release.*", prefix: "release", suffix: "", contains: nil}, + {regex: "^env-[0-9]+laio[1]?[^0-9].*", prefix: "env-", suffix: "", contains: []string{"laio"}}, } for _, c := range cases { - parsed, err := syntax.Parse(c.regex, syntax.Perl) + parsed, err := syntax.Parse(c.regex, syntax.Perl|syntax.DotNL) require.NoError(t, err) prefix, suffix, contains := optimizeConcatRegex(parsed) @@ -98,41 +177,1219 @@ func TestOptimizeConcatRegex(t *testing.T) { } } -func BenchmarkFastRegexMatcher(b *testing.B) { - var ( - x = strings.Repeat("x", 50) - y = "foo" + x - z = x + "foo" - ) - regexes := []string{ - "foo", - "^foo", - "(foo|bar)", - "foo.*", - ".*foo", - "^.*foo$", - "^.+foo$", - ".*", - ".+", - "foo.+", - ".+foo", - ".*foo.*", - "(?i:foo)", - "(prometheus|api_prom)_api_v1_.+", - "((fo(bar))|.+foo)", +// Refer to https://github.com/prometheus/prometheus/issues/2651. +func TestFindSetMatches(t *testing.T) { + for _, c := range []struct { + pattern string + expMatches []string + expCaseSensitive bool + }{ + // Single value, coming from a `bar=~"foo"` selector. + {"foo", []string{"foo"}, true}, + {"^foo", []string{"foo"}, true}, + {"^foo$", []string{"foo"}, true}, + // Simple sets alternates. + {"foo|bar|zz", []string{"foo", "bar", "zz"}, true}, + // Simple sets alternate and concat (bar|baz is parsed as "ba[rz]"). + {"foo|bar|baz", []string{"foo", "bar", "baz"}, true}, + // Simple sets alternate and concat and capture + {"foo|bar|baz|(zz)", []string{"foo", "bar", "baz", "zz"}, true}, + // Simple sets alternate and concat and alternates with empty matches + // parsed as b(ar|(?:)|uzz) where b(?:) means literal b. + {"bar|b|buzz", []string{"bar", "b", "buzz"}, true}, + // Skip nested capture groups. + {"^((bar|b|buzz))$", []string{"bar", "b", "buzz"}, true}, + // Skip outer anchors (it's enforced anyway at the root). + {"^(bar|b|buzz)$", []string{"bar", "b", "buzz"}, true}, + {"^(?:prod|production)$", []string{"prod", "production"}, true}, + // Do not optimize regexp with inner anchors. + {"(bar|b|b^uz$z)", nil, false}, + // Do not optimize regexp with empty string matcher. + {"^$|Running", nil, false}, + // Simple sets containing escaped characters. + {"fo\\.o|bar\\?|\\^baz", []string{"fo.o", "bar?", "^baz"}, true}, + // using charclass + {"[abc]d", []string{"ad", "bd", "cd"}, true}, + // high low charset different => A(B[CD]|EF)|BC[XY] + {"ABC|ABD|AEF|BCX|BCY", []string{"ABC", "ABD", "AEF", "BCX", "BCY"}, true}, + // triple concat + {"api_(v1|prom)_push", []string{"api_v1_push", "api_prom_push"}, true}, + // triple concat with multiple alternates + {"(api|rpc)_(v1|prom)_push", []string{"api_v1_push", "api_prom_push", "rpc_v1_push", "rpc_prom_push"}, true}, + {"(api|rpc)_(v1|prom)_(push|query)", []string{"api_v1_push", "api_v1_query", "api_prom_push", "api_prom_query", "rpc_v1_push", "rpc_v1_query", "rpc_prom_push", "rpc_prom_query"}, true}, + // class starting with "-" + {"[-1-2][a-c]", []string{"-a", "-b", "-c", "1a", "1b", "1c", "2a", "2b", "2c"}, true}, + {"[1^3]", []string{"1", "3", "^"}, true}, + // OpPlus with concat + {"(.+)/(foo|bar)", nil, false}, + // Simple sets containing special characters without escaping. + {"fo.o|bar?|^baz", nil, false}, + // case sensitive wrapper. + {"(?i)foo", []string{"FOO"}, false}, + // case sensitive wrapper on alternate. + {"(?i)foo|bar|baz", []string{"FOO", "BAR", "BAZ", "BAr", "BAz"}, false}, + // mixed case sensitivity. + {"(api|rpc)_(v1|prom)_((?i)push|query)", nil, false}, + // mixed case sensitivity concatenation only without capture group. + {"api_v1_(?i)push", nil, false}, + // mixed case sensitivity alternation only without capture group. + {"api|(?i)rpc", nil, false}, + // case sensitive after unsetting insensitivity. + {"rpc|(?i)(?-i)api", []string{"rpc", "api"}, true}, + // case sensitive after unsetting insensitivity in all alternation options. + {"(?i)((?-i)api|(?-i)rpc)", []string{"api", "rpc"}, true}, + // mixed case sensitivity after unsetting insensitivity. + {"(?i)rpc|(?-i)api", nil, false}, + // too high charset combination + {"(api|rpc)_[^0-9]", nil, false}, + // too many combinations + {"[a-z][a-z]", nil, false}, + } { + c := c + t.Run(c.pattern, func(t *testing.T) { + t.Parallel() + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) + require.NoError(t, err) + matches, actualCaseSensitive := findSetMatches(parsed) + require.Equal(t, c.expMatches, matches) + require.Equal(t, c.expCaseSensitive, actualCaseSensitive) + + if c.expCaseSensitive { + // When the regexp is case sensitive, we want to ensure that the + // set matches are maintained in the final matcher. + r, err := NewFastRegexMatcher(c.pattern) + require.NoError(t, err) + require.Equal(t, c.expMatches, r.SetMatches()) + } + }) } +} + +func TestFastRegexMatcher_SetMatches_ShouldReturnACopy(t *testing.T) { + m, err := NewFastRegexMatcher("a|b") + require.NoError(t, err) + require.Equal(t, []string{"a", "b"}, m.SetMatches()) + + // Manipulate the returned slice. + matches := m.SetMatches() + matches[0] = "xxx" + matches[1] = "yyy" + + // Ensure that if we call SetMatches() again we get the original one. + require.Equal(t, []string{"a", "b"}, m.SetMatches()) +} + +func BenchmarkFastRegexMatcher(b *testing.B) { + texts := generateRandomValues() + for _, r := range regexes { - r := r - b.Run(r, func(b *testing.B) { + b.Run(getTestNameFromRegexp(r), func(b *testing.B) { m, err := NewFastRegexMatcher(r) require.NoError(b, err) + b.ResetTimer() for i := 0; i < b.N; i++ { - _ = m.MatchString(x) - _ = m.MatchString(y) - _ = m.MatchString(z) + for _, text := range texts { + _ = m.MatchString(text) + } + } + }) + } +} + +func BenchmarkToNormalizedLower(b *testing.B) { + benchCase := func(l int, uppercase string, asciiOnly bool, alt int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + if !asciiOnly { + chars = "aаbбcвdгeдfеgёhжiзjиkйlкmлnмoнpоqпrрsсtтuуvфwхxцyчzш" + } + // Swap the alphabet to make alternatives. + chars = chars[alt%len(chars):] + chars[:alt%len(chars)] + + str := strings.Repeat(chars, l/len(chars)+1)[:l] + switch uppercase { + case "first": + return strings.ToUpper(str[:1]) + str[1:] + case "last": + return str[:len(str)-1] + strings.ToUpper(str[len(str)-1:]) + case "all": + return strings.ToUpper(str) + case "none": + return str + default: + panic("invalid uppercase") + } + } + + for _, l := range []int{10, 100, 1000, 4000} { + b.Run(fmt.Sprintf("length=%d", l), func(b *testing.B) { + for _, uppercase := range []string{"none", "first", "last", "all"} { + b.Run("uppercase="+uppercase, func(b *testing.B) { + for _, asciiOnly := range []bool{true, false} { + b.Run(fmt.Sprintf("ascii=%t", asciiOnly), func(b *testing.B) { + inputs := make([]string, 10) + for i := range inputs { + inputs[i] = benchCase(l, uppercase, asciiOnly, i) + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + toNormalisedLower(inputs[n%len(inputs)]) + } + }) + } + }) } }) + } +} + +func TestStringMatcherFromRegexp(t *testing.T) { + for _, c := range []struct { + pattern string + exp StringMatcher + }{ + {".*", trueMatcher{}}, + {".*?", trueMatcher{}}, + {"(?s:.*)", trueMatcher{}}, + {"(.*)", trueMatcher{}}, + {"^.*$", trueMatcher{}}, + {".+", &anyNonEmptyStringMatcher{matchNL: true}}, + {"(?s:.+)", &anyNonEmptyStringMatcher{matchNL: true}}, + {"^.+$", &anyNonEmptyStringMatcher{matchNL: true}}, + {"(.+)", &anyNonEmptyStringMatcher{matchNL: true}}, + {"", emptyStringMatcher{}}, + {"^$", emptyStringMatcher{}}, + {"^foo$", &equalStringMatcher{s: "foo", caseSensitive: true}}, + {"^(?i:foo)$", &equalStringMatcher{s: "FOO", caseSensitive: false}}, + {"^((?i:foo)|(bar))$", orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO", caseSensitive: false}, &equalStringMatcher{s: "bar", caseSensitive: true}})}, + {`(?i:((foo|bar)))`, orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO", caseSensitive: false}, &equalStringMatcher{s: "BAR", caseSensitive: false}})}, + {`(?i:((foo1|foo2|bar)))`, orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})}, + {"^((?i:foo|oo)|(bar))$", orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO", caseSensitive: false}, &equalStringMatcher{s: "OO", caseSensitive: false}, &equalStringMatcher{s: "bar", caseSensitive: true}})}, + {"(?i:(foo1|foo2|bar))", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})}, + {".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: trueMatcher{}}}, + {"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"10\\.0\\.(1|2)\\.+", nil}, + {"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{matchNL: true}, suffix: "foo", suffixCaseSensitive: true}}, + {"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: trueMatcher{}}}, + {"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}, &equalStringMatcher{s: "foo", caseSensitive: true}})}, + {"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: true}}})}, + {"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: nil}}, + // we don't support case insensitive matching for contains. + // This is because there's no strings.IndexOfFold function. + // We can revisit later if this is really popular by using strings.ToUpper. + {"^(.*)((?i)foo|foobar)(.*)$", nil}, + {"(api|rpc)_(v1|prom)_((?i)push|query)", nil}, + {"[a-z][a-z]", nil}, + {"[1^3]", nil}, + {".*foo.*bar.*", nil}, + {`\d*`, nil}, + {".", nil}, + {"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: trueMatcher{}}}}}, + // This one is not supported because `stringMatcherFromRegexp` is not reentrant for syntax.OpConcat. + // It would make the code too complex to handle it. + {"(.+)/(foo.*|bar$)", nil}, + // Case sensitive alternate with same literal prefix and .* suffix. + {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: trueMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: trueMatcher{}}}}}, + // Case insensitive alternate with same literal prefix and .* suffix. + {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}}, + {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}}, + // Concatenated variable length selectors are not supported. + {"foo.*.*", nil}, + {"foo.+.+", nil}, + {".*.*foo", nil}, + {".+.+foo", nil}, + {"aaa.?.?", nil}, + {"aaa.?.*", nil}, + // Regexps with ".?". + {"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, + {"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, + {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}}, + {"f.?o", nil}, + } { + c := c + t.Run(c.pattern, func(t *testing.T) { + t.Parallel() + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) + require.NoError(t, err) + matches := stringMatcherFromRegexp(parsed) + require.Equal(t, c.exp, matches) + }) + } +} + +func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) { + for _, c := range []struct { + pattern string + expectedLiteralPrefixMatchers int + expectedMatches []string + expectedNotMatches []string + }{ + // Case sensitive. + { + pattern: "(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", + expectedLiteralPrefixMatchers: 3, + expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"}, + expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"}, + }, + + // Case insensitive. + { + pattern: "(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", + expectedLiteralPrefixMatchers: 3, + expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"}, + expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp"}, + }, + + // Nested literal prefixes, case sensitive. + { + pattern: "(xyz-(aaa-(111.*)|bbb-(222.*)))|(xyz-(aaa-(333.*)|bbb-(444.*)))", + expectedLiteralPrefixMatchers: 10, + expectedMatches: []string{"xyz-aaa-111", "xyz-aaa-111XXX", "xyz-aaa-333", "xyz-aaa-333XXX", "xyz-bbb-222", "xyz-bbb-222XXX", "xyz-bbb-444", "xyz-bbb-444XXX"}, + expectedNotMatches: []string{"XYZ-aaa-111", "xyz-aaa-11", "xyz-aaa-222", "xyz-bbb-111"}, + }, + + // Nested literal prefixes, case insensitive. + { + pattern: "(?i)(xyz-(aaa-(111.*)|bbb-(222.*)))|(xyz-(aaa-(333.*)|bbb-(444.*)))", + expectedLiteralPrefixMatchers: 10, + expectedMatches: []string{"xyz-aaa-111", "XYZ-aaa-111XXX", "xyz-aaa-333", "xyz-AAA-333XXX", "xyz-bbb-222", "xyz-BBB-222XXX", "XYZ-bbb-444", "xyz-bbb-444XXX"}, + expectedNotMatches: []string{"xyz-aaa-11", "xyz-aaa-222", "xyz-bbb-111"}, + }, + + // Mixed case sensitivity. + { + pattern: "(xyz-((?i)(aaa.*|bbb.*)))", + expectedLiteralPrefixMatchers: 3, + expectedMatches: []string{"xyz-aaa", "xyz-AAA", "xyz-aaaXXX", "xyz-AAAXXX", "xyz-bbb", "xyz-BBBXXX"}, + expectedNotMatches: []string{"XYZ-aaa", "xyz-aa", "yz-aaa", "aaa"}, + }, + } { + t.Run(c.pattern, func(t *testing.T) { + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) + require.NoError(t, err) + + matcher := stringMatcherFromRegexp(parsed) + require.NotNil(t, matcher) + + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") + + // Pre-condition check: ensure it contains literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher. + numPrefixMatchers := 0 + visitStringMatcher(matcher, func(matcher StringMatcher) { + if _, ok := matcher.(*literalPrefixSensitiveStringMatcher); ok { + numPrefixMatchers++ + } + if _, ok := matcher.(*literalPrefixInsensitiveStringMatcher); ok { + numPrefixMatchers++ + } + }) + + require.Equal(t, c.expectedLiteralPrefixMatchers, numPrefixMatchers) + + for _, value := range c.expectedMatches { + require.Truef(t, matcher.Matches(value), "Value: %s", value) + + // Ensure the golang regexp engine would return the same. + require.Truef(t, re.MatchString(value), "Value: %s", value) + } + + for _, value := range c.expectedNotMatches { + require.Falsef(t, matcher.Matches(value), "Value: %s", value) + + // Ensure the golang regexp engine would return the same. + require.Falsef(t, re.MatchString(value), "Value: %s", value) + } + }) + } +} + +func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) { + for _, c := range []struct { + pattern string + expectedLiteralSuffixMatchers int + expectedMatches []string + expectedNotMatches []string + }{ + // Case sensitive. + { + pattern: "(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)", + expectedLiteralSuffixMatchers: 2, + expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"}, + expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"}, + }, + + // Case insensitive. + { + pattern: "(?i)(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)", + expectedLiteralSuffixMatchers: 2, + expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"}, + expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp"}, + }, + + // Nested literal suffixes, case sensitive. + { + pattern: "(.*aaa|.*bbb(.*ccc|.*ddd))", + expectedLiteralSuffixMatchers: 3, + expectedMatches: []string{"aaa", "XXXaaa", "bbbccc", "XXXbbbccc", "XXXbbbXXXccc", "bbbddd", "bbbddd", "XXXbbbddd", "XXXbbbXXXddd", "bbbXXXccc", "aaabbbccc", "aaabbbddd"}, + expectedNotMatches: []string{"AAA", "aa", "Xaa", "BBBCCC", "bb", "Xbb", "bbccc", "bbbcc", "bbbdd"}, + }, + + // Mixed case sensitivity. + { + pattern: "(.*aaa|.*bbb((?i)(.*ccc|.*ddd)))", + expectedLiteralSuffixMatchers: 3, + expectedMatches: []string{"aaa", "XXXaaa", "bbbccc", "bbbCCC", "bbbXXXCCC", "bbbddd", "bbbDDD", "bbbXXXddd", "bbbXXXDDD"}, + expectedNotMatches: []string{"AAA", "XXXAAA", "BBBccc", "BBBCCC", "aaaBBB"}, + }, + } { + t.Run(c.pattern, func(t *testing.T) { + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) + require.NoError(t, err) + + matcher := stringMatcherFromRegexp(parsed) + require.NotNil(t, matcher) + + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") + + // Pre-condition check: ensure it contains literalSuffixStringMatcher. + numSuffixMatchers := 0 + visitStringMatcher(matcher, func(matcher StringMatcher) { + if _, ok := matcher.(*literalSuffixStringMatcher); ok { + numSuffixMatchers++ + } + }) + + require.Equal(t, c.expectedLiteralSuffixMatchers, numSuffixMatchers) + + for _, value := range c.expectedMatches { + require.Truef(t, matcher.Matches(value), "Value: %s", value) + + // Ensure the golang regexp engine would return the same. + require.Truef(t, re.MatchString(value), "Value: %s", value) + } + + for _, value := range c.expectedNotMatches { + require.Falsef(t, matcher.Matches(value), "Value: %s", value) + + // Ensure the golang regexp engine would return the same. + require.Falsef(t, re.MatchString(value), "Value: %s", value) + } + }) + } +} + +func TestStringMatcherFromRegexp_Quest(t *testing.T) { + for _, c := range []struct { + pattern string + expectedZeroOrOneMatchers int + expectedMatches []string + expectedNotMatches []string + }{ + // Not match newline. + { + pattern: "test.?", + expectedZeroOrOneMatchers: 1, + expectedMatches: []string{"test\n", "test", "test!"}, + expectedNotMatches: []string{"tes", "test!!"}, + }, + { + pattern: ".?test", + expectedZeroOrOneMatchers: 1, + expectedMatches: []string{"\ntest", "test", "!test"}, + expectedNotMatches: []string{"tes", "test!"}, + }, + { + pattern: "(aaa.?|bbb.?)", + expectedZeroOrOneMatchers: 2, + expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX", "aaa\n", "bbb\n"}, + expectedNotMatches: []string{"aa", "aaaXX", "bb", "bbbXX"}, + }, + { + pattern: ".*aaa.?", + expectedZeroOrOneMatchers: 1, + expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX", "XXXaaa\n"}, + expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX"}, + }, + + // Match newline. + { + pattern: "(?s)test.?", + expectedZeroOrOneMatchers: 1, + expectedMatches: []string{"test", "test!", "test\n"}, + expectedNotMatches: []string{"tes", "test!!", "test\n\n"}, + }, + + // Mixed flags (a part matches newline another doesn't). + { + pattern: "(aaa.?|((?s).?bbb.+))", + expectedZeroOrOneMatchers: 2, + expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX", "aaa\n"}, + expectedNotMatches: []string{"aa", "Xbbb", "\nbbb"}, + }, + } { + t.Run(c.pattern, func(t *testing.T) { + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) + require.NoError(t, err) + + matcher := stringMatcherFromRegexp(parsed) + require.NotNil(t, matcher) + + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") + + // Pre-condition check: ensure it contains zeroOrOneCharacterStringMatcher. + numZeroOrOneMatchers := 0 + visitStringMatcher(matcher, func(matcher StringMatcher) { + if _, ok := matcher.(*zeroOrOneCharacterStringMatcher); ok { + numZeroOrOneMatchers++ + } + }) + + require.Equal(t, c.expectedZeroOrOneMatchers, numZeroOrOneMatchers) + + for _, value := range c.expectedMatches { + require.Truef(t, matcher.Matches(value), "Value: %s", value) + + // Ensure the golang regexp engine would return the same. + require.Truef(t, re.MatchString(value), "Value: %s", value) + } + + for _, value := range c.expectedNotMatches { + require.Falsef(t, matcher.Matches(value), "Value: %s", value) + + // Ensure the golang regexp engine would return the same. + require.Falsef(t, re.MatchString(value), "Value: %s", value) + } + }) + } +} + +func randString(randGenerator *rand.Rand, length int) string { + b := make([]rune, length) + for i := range b { + b[i] = asciiRunes[randGenerator.Intn(len(asciiRunes))] + } + return string(b) +} + +func randStrings(randGenerator *rand.Rand, many, length int) []string { + out := make([]string, 0, many) + for i := 0; i < many; i++ { + out = append(out, randString(randGenerator, length)) + } + return out +} +func randStringsWithSuffix(randGenerator *rand.Rand, many, length int, suffix string) []string { + out := randStrings(randGenerator, many, length) + for i := range out { + out[i] += suffix + } + return out +} + +func TestOptimizeEqualOrPrefixStringMatchers(t *testing.T) { + tests := map[string]struct { + input StringMatcher + expectedValues []string + expectedCaseSensitive bool + }{ + "should skip optimization on orStringMatcher with containsStringMatcher": { + input: orStringMatcher{ + &equalStringMatcher{s: "FOO", caseSensitive: true}, + &containsStringMatcher{substrings: []string{"a", "b", "c"}}, + }, + expectedValues: nil, + }, + "should run optimization on orStringMatcher with equalStringMatcher and same case sensitivity": { + input: orStringMatcher{ + &equalStringMatcher{s: "FOO", caseSensitive: true}, + &equalStringMatcher{s: "bar", caseSensitive: true}, + &equalStringMatcher{s: "baz", caseSensitive: true}, + }, + expectedValues: []string{"FOO", "bar", "baz"}, + expectedCaseSensitive: true, + }, + "should skip optimization on orStringMatcher with equalStringMatcher but different case sensitivity": { + input: orStringMatcher{ + &equalStringMatcher{s: "FOO", caseSensitive: true}, + &equalStringMatcher{s: "bar", caseSensitive: false}, + &equalStringMatcher{s: "baz", caseSensitive: true}, + }, + expectedValues: nil, + }, + "should run optimization on orStringMatcher with nested orStringMatcher and equalStringMatcher, and same case sensitivity": { + input: orStringMatcher{ + &equalStringMatcher{s: "FOO", caseSensitive: true}, + orStringMatcher{ + &equalStringMatcher{s: "bar", caseSensitive: true}, + &equalStringMatcher{s: "xxx", caseSensitive: true}, + }, + &equalStringMatcher{s: "baz", caseSensitive: true}, + }, + expectedValues: []string{"FOO", "bar", "xxx", "baz"}, + expectedCaseSensitive: true, + }, + "should skip optimization on orStringMatcher with nested orStringMatcher and equalStringMatcher, but different case sensitivity": { + input: orStringMatcher{ + &equalStringMatcher{s: "FOO", caseSensitive: true}, + orStringMatcher{ + // Case sensitivity is different within items at the same level. + &equalStringMatcher{s: "bar", caseSensitive: true}, + &equalStringMatcher{s: "xxx", caseSensitive: false}, + }, + &equalStringMatcher{s: "baz", caseSensitive: true}, + }, + expectedValues: nil, + }, + "should skip optimization on orStringMatcher with nested orStringMatcher and equalStringMatcher, but different case sensitivity in the nested one": { + input: orStringMatcher{ + &equalStringMatcher{s: "FOO", caseSensitive: true}, + // Case sensitivity is different between the parent and child. + orStringMatcher{ + &equalStringMatcher{s: "bar", caseSensitive: false}, + &equalStringMatcher{s: "xxx", caseSensitive: false}, + }, + &equalStringMatcher{s: "baz", caseSensitive: true}, + }, + expectedValues: nil, + }, + "should return unchanged values on few case insensitive matchers": { + input: orStringMatcher{ + &equalStringMatcher{s: "FOO", caseSensitive: false}, + orStringMatcher{ + &equalStringMatcher{s: "bAr", caseSensitive: false}, + }, + &equalStringMatcher{s: "baZ", caseSensitive: false}, + }, + expectedValues: []string{"FOO", "bAr", "baZ"}, + expectedCaseSensitive: false, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + actualMatcher := optimizeEqualOrPrefixStringMatchers(testData.input, 0) + + if testData.expectedValues == nil { + require.IsType(t, testData.input, actualMatcher) + } else { + require.IsType(t, &equalMultiStringSliceMatcher{}, actualMatcher) + require.Equal(t, testData.expectedValues, actualMatcher.(*equalMultiStringSliceMatcher).values) + require.Equal(t, testData.expectedCaseSensitive, actualMatcher.(*equalMultiStringSliceMatcher).caseSensitive) + } + }) + } +} + +func TestNewEqualMultiStringMatcher(t *testing.T) { + tests := map[string]struct { + values []string + caseSensitivePrefixes []*literalPrefixSensitiveStringMatcher + caseSensitive bool + expectedValuesMap map[string]struct{} + expectedPrefixesMap map[string][]StringMatcher + expectedValuesList []string + }{ + "few case sensitive values": { + values: []string{"a", "B"}, + caseSensitive: true, + expectedValuesList: []string{"a", "B"}, + }, + "few case insensitive values": { + values: []string{"a", "B"}, + caseSensitive: false, + expectedValuesList: []string{"a", "B"}, + }, + "few case sensitive values and prefixes": { + values: []string{"a"}, + caseSensitivePrefixes: []*literalPrefixSensitiveStringMatcher{{prefix: "B", right: anyStringWithoutNewlineMatcher{}}}, + caseSensitive: true, + expectedValuesMap: map[string]struct{}{"a": {}}, + expectedPrefixesMap: map[string][]StringMatcher{"B": {&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}}}, + }, + "many case sensitive values": { + values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, + caseSensitive: true, + expectedValuesMap: map[string]struct{}{"a": {}, "B": {}, "c": {}, "D": {}, "e": {}, "F": {}, "g": {}, "H": {}, "i": {}, "L": {}, "m": {}, "N": {}, "o": {}, "P": {}, "q": {}, "r": {}}, + expectedPrefixesMap: map[string][]StringMatcher{}, + }, + "many case insensitive values": { + values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, + caseSensitive: false, + expectedValuesMap: map[string]struct{}{"a": {}, "b": {}, "c": {}, "d": {}, "e": {}, "f": {}, "g": {}, "h": {}, "i": {}, "l": {}, "m": {}, "n": {}, "o": {}, "p": {}, "q": {}, "r": {}}, + expectedPrefixesMap: map[string][]StringMatcher{}, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + // To keep this test simple, we always assume a min prefix length of 1. + minPrefixLength := 0 + if len(testData.caseSensitivePrefixes) > 0 { + minPrefixLength = 1 + } + + matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values), len(testData.caseSensitivePrefixes), minPrefixLength) + for _, v := range testData.values { + matcher.add(v) + } + for _, p := range testData.caseSensitivePrefixes { + matcher.addPrefix(p.prefix, true, p) + } + + if testData.expectedValuesMap != nil || testData.expectedPrefixesMap != nil { + require.IsType(t, &equalMultiStringMapMatcher{}, matcher) + require.Equal(t, testData.expectedValuesMap, matcher.(*equalMultiStringMapMatcher).values) + require.Equal(t, testData.expectedPrefixesMap, matcher.(*equalMultiStringMapMatcher).prefixes) + require.Equal(t, testData.caseSensitive, matcher.(*equalMultiStringMapMatcher).caseSensitive) + } + if testData.expectedValuesList != nil { + require.IsType(t, &equalMultiStringSliceMatcher{}, matcher) + require.Equal(t, testData.expectedValuesList, matcher.(*equalMultiStringSliceMatcher).values) + require.Equal(t, testData.caseSensitive, matcher.(*equalMultiStringSliceMatcher).caseSensitive) + } + }) + } +} + +func TestEqualMultiStringMapMatcher_addPrefix(t *testing.T) { + t.Run("should panic if the matcher is case sensitive but the prefix is not case sensitive", func(t *testing.T) { + matcher := newEqualMultiStringMatcher(true, 0, 1, 1) + + require.Panics(t, func() { + matcher.addPrefix("a", false, &literalPrefixInsensitiveStringMatcher{ + prefix: "a", + }) + }) + }) + + t.Run("should panic if the matcher is not case sensitive but the prefix is case sensitive", func(t *testing.T) { + matcher := newEqualMultiStringMatcher(false, 0, 1, 1) + + require.Panics(t, func() { + matcher.addPrefix("a", true, &literalPrefixSensitiveStringMatcher{ + prefix: "a", + }) + }) + }) +} + +func TestEqualMultiStringMatcher_Matches(t *testing.T) { + tests := map[string]struct { + values []string + prefixes []StringMatcher + caseSensitive bool + expectedMatches []string + expectedNotMatches []string + }{ + "few case sensitive values": { + values: []string{"a", "B"}, + caseSensitive: true, + expectedMatches: []string{"a", "B"}, + expectedNotMatches: []string{"A", "b"}, + }, + "few case insensitive values": { + values: []string{"a", "B"}, + caseSensitive: false, + expectedMatches: []string{"a", "A", "b", "B"}, + expectedNotMatches: []string{"c", "C"}, + }, + "few case sensitive prefixes": { + prefixes: []StringMatcher{ + &literalPrefixSensitiveStringMatcher{prefix: "a", right: anyStringWithoutNewlineMatcher{}}, + &literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}, + }, + caseSensitive: true, + expectedMatches: []string{"a", "aX", "B", "BX"}, + expectedNotMatches: []string{"A", "b"}, + }, + "few case insensitive prefixes": { + prefixes: []StringMatcher{ + &literalPrefixInsensitiveStringMatcher{prefix: "a", right: anyStringWithoutNewlineMatcher{}}, + &literalPrefixInsensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}, + }, + caseSensitive: false, + expectedMatches: []string{"a", "aX", "A", "AX", "b", "bX", "B", "BX"}, + expectedNotMatches: []string{"c", "cX", "C", "CX"}, + }, + "many case sensitive values": { + values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, + caseSensitive: true, + expectedMatches: []string{"a", "B"}, + expectedNotMatches: []string{"A", "b"}, + }, + "many case insensitive values": { + values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, + caseSensitive: false, + expectedMatches: []string{"a", "A", "b", "B"}, + expectedNotMatches: []string{"x", "X"}, + }, + "mixed values and prefixes": { + values: []string{"a"}, + prefixes: []StringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}}, + caseSensitive: true, + expectedMatches: []string{"a", "B", "BX"}, + expectedNotMatches: []string{"aX", "A", "b", "bX"}, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + // To keep this test simple, we always assume a min prefix length of 1. + minPrefixLength := 0 + if len(testData.prefixes) > 0 { + minPrefixLength = 1 + } + + matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values), len(testData.prefixes), minPrefixLength) + for _, v := range testData.values { + matcher.add(v) + } + for _, p := range testData.prefixes { + switch m := p.(type) { + case *literalPrefixSensitiveStringMatcher: + matcher.addPrefix(m.prefix, true, p) + case *literalPrefixInsensitiveStringMatcher: + matcher.addPrefix(m.prefix, false, p) + default: + panic("Unexpected type in test case") + } + } + + for _, v := range testData.expectedMatches { + require.True(t, matcher.Matches(v), "value: %s", v) + } + for _, v := range testData.expectedNotMatches { + require.False(t, matcher.Matches(v), "value: %s", v) + } + }) + } +} + +func TestFindEqualOrPrefixStringMatchers(t *testing.T) { + type match struct { + s string + caseSensitive bool + } + + // Utility to call findEqualOrPrefixStringMatchers() and collect all callback invocations. + findEqualOrPrefixStringMatchersAndCollectMatches := func(input StringMatcher) (matches []match, ok bool) { + ok = findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { + matches = append(matches, match{matcher.s, matcher.caseSensitive}) + return true + }, func(prefix string, prefixCaseSensitive bool, right StringMatcher) bool { + matches = append(matches, match{prefix, prefixCaseSensitive}) + return true + }) + + return + } + + t.Run("empty matcher", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(emptyStringMatcher{}) + require.False(t, actualOk) + require.Empty(t, actualMatches) + }) + + t.Run("concat of literal matchers (case sensitive)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &equalStringMatcher{s: "test-1", caseSensitive: true}, + &equalStringMatcher{s: "test-2", caseSensitive: true}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches) + }) + + t.Run("concat of literal matchers (case insensitive)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &equalStringMatcher{s: "test-1", caseSensitive: false}, + &equalStringMatcher{s: "test-2", caseSensitive: false}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", false}, {"test-2", false}}, actualMatches) + }) + + t.Run("concat of literal matchers (mixed case)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &equalStringMatcher{s: "test-1", caseSensitive: false}, + &equalStringMatcher{s: "test-2", caseSensitive: true}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches) + }) + + t.Run("concat of literal prefix matchers (case sensitive)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &literalPrefixSensitiveStringMatcher{prefix: "test-1"}, + &literalPrefixSensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches) + }) + + t.Run("concat of literal prefix matchers (case insensitive)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &literalPrefixInsensitiveStringMatcher{prefix: "test-1"}, + &literalPrefixInsensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", false}, {"test-2", false}}, actualMatches) + }) + + t.Run("concat of literal prefix matchers (mixed case)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &literalPrefixInsensitiveStringMatcher{prefix: "test-1"}, + &literalPrefixSensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches) + }) + + t.Run("concat of literal string and prefix matchers (case sensitive)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &equalStringMatcher{s: "test-1", caseSensitive: true}, + &literalPrefixSensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches) + }) +} + +// This benchmark is used to find a good threshold to use to apply the optimization +// done by optimizeEqualOrPrefixStringMatchers(). +func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) { + randGenerator := rand.New(rand.NewSource(time.Now().UnixNano())) + + // Generate variable lengths random texts to match against. + texts := append([]string{}, randStrings(randGenerator, 10, 10)...) + texts = append(texts, randStrings(randGenerator, 5, 30)...) + texts = append(texts, randStrings(randGenerator, 1, 100)...) + + for numAlternations := 2; numAlternations <= 256; numAlternations *= 2 { + for _, caseSensitive := range []bool{true, false} { + for _, prefixMatcher := range []bool{true, false} { + b.Run(fmt.Sprintf("alternations: %d case sensitive: %t prefix matcher: %t", numAlternations, caseSensitive, prefixMatcher), func(b *testing.B) { + // If the test should run on prefix matchers, we add a wildcard matcher as suffix (prefix will be a literal). + suffix := "" + if prefixMatcher { + suffix = ".*" + } + + // Generate a regex with the expected number of alternations. + re := strings.Join(randStringsWithSuffix(randGenerator, numAlternations, 10, suffix), "|") + if !caseSensitive { + re = "(?i:(" + re + "))" + } + b.Logf("regexp: %s", re) + + parsed, err := syntax.Parse(re, syntax.Perl|syntax.DotNL) + require.NoError(b, err) + + unoptimized := stringMatcherFromRegexpInternal(parsed) + require.IsType(b, orStringMatcher{}, unoptimized) + + optimized := optimizeEqualOrPrefixStringMatchers(unoptimized, 0) + if numAlternations < minEqualMultiStringMatcherMapThreshold && !prefixMatcher { + require.IsType(b, &equalMultiStringSliceMatcher{}, optimized) + } else { + require.IsType(b, &equalMultiStringMapMatcher{}, optimized) + } + + b.Run("without optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) { + for n := 0; n < b.N; n++ { + for _, t := range texts { + unoptimized.Matches(t) + } + } + }) + + b.Run("with optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) { + for n := 0; n < b.N; n++ { + for _, t := range texts { + optimized.Matches(t) + } + } + }) + }) + } + } + } +} + +func TestZeroOrOneCharacterStringMatcher(t *testing.T) { + t.Run("match newline", func(t *testing.T) { + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} + require.True(t, matcher.Matches("")) + require.True(t, matcher.Matches("x")) + require.True(t, matcher.Matches("\n")) + require.False(t, matcher.Matches("xx")) + require.False(t, matcher.Matches("\n\n")) + }) + + t.Run("do not match newline", func(t *testing.T) { + matcher := &zeroOrOneCharacterStringMatcher{matchNL: false} + require.True(t, matcher.Matches("")) + require.True(t, matcher.Matches("x")) + require.False(t, matcher.Matches("\n")) + require.False(t, matcher.Matches("xx")) + require.False(t, matcher.Matches("\n\n")) + }) + + t.Run("unicode", func(t *testing.T) { + // Just for documentation purposes, emoji1 is 1 rune, emoji2 is 2 runes. + // Having this in mind, will make future readers fixing tests easier. + emoji1 := "😀" + emoji2 := "❤️" + require.Equal(t, 1, utf8.RuneCountInString(emoji1)) + require.Equal(t, 2, utf8.RuneCountInString(emoji2)) + + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} + require.True(t, matcher.Matches(emoji1)) + require.False(t, matcher.Matches(emoji2)) + require.False(t, matcher.Matches(emoji1+emoji1)) + require.False(t, matcher.Matches("x"+emoji1)) + require.False(t, matcher.Matches(emoji1+"x")) + require.False(t, matcher.Matches(emoji1+emoji2)) + }) + + t.Run("invalid unicode", func(t *testing.T) { + // Just for reference, we also compare to what `^.?$` regular expression matches. + re := regexp.MustCompile("^.?$") + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} + + requireMatches := func(s string, expected bool) { + t.Helper() + require.Equal(t, expected, matcher.Matches(s)) + require.Equal(t, re.MatchString(s), matcher.Matches(s)) + } + + requireMatches("\xff", true) + requireMatches("x\xff", false) + requireMatches("\xffx", false) + requireMatches("\xff\xfe", false) + }) +} + +func BenchmarkZeroOrOneCharacterStringMatcher(b *testing.B) { + type benchCase struct { + str string + matches bool + } + + emoji1 := "😀" + emoji2 := "❤️" + cases := []benchCase{ + {"", true}, + {"x", true}, + {"\n", true}, + {"xx", false}, + {"\n\n", false}, + {emoji1, true}, + {emoji2, false}, + {emoji1 + emoji1, false}, + {strings.Repeat("x", 100), false}, + {strings.Repeat(emoji1, 100), false}, + {strings.Repeat(emoji2, 100), false}, + } + + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} + b.ResetTimer() + + for n := 0; n < b.N; n++ { + c := cases[n%len(cases)] + got := matcher.Matches(c.str) + if got != c.matches { + b.Fatalf("unexpected result for %q: got %t, want %t", c.str, got, c.matches) + } + } +} + +func TestLiteralPrefixSensitiveStringMatcher(t *testing.T) { + m := &literalPrefixSensitiveStringMatcher{prefix: "mar", right: &emptyStringMatcher{}} + require.True(t, m.Matches("mar")) + require.False(t, m.Matches("marco")) + require.False(t, m.Matches("ma")) + require.False(t, m.Matches("mAr")) + + m = &literalPrefixSensitiveStringMatcher{prefix: "mar", right: &equalStringMatcher{s: "co", caseSensitive: false}} + require.True(t, m.Matches("marco")) + require.True(t, m.Matches("marCO")) + require.False(t, m.Matches("MARco")) + require.False(t, m.Matches("mar")) + require.False(t, m.Matches("marcopracucci")) +} + +func TestLiteralPrefixInsensitiveStringMatcher(t *testing.T) { + m := &literalPrefixInsensitiveStringMatcher{prefix: "mar", right: &emptyStringMatcher{}} + require.True(t, m.Matches("mar")) + require.False(t, m.Matches("marco")) + require.False(t, m.Matches("ma")) + require.True(t, m.Matches("mAr")) +} + +func TestLiteralSuffixStringMatcher(t *testing.T) { + m := &literalSuffixStringMatcher{left: &emptyStringMatcher{}, suffix: "co", suffixCaseSensitive: true} + require.True(t, m.Matches("co")) + require.False(t, m.Matches("marco")) + require.False(t, m.Matches("coo")) + require.False(t, m.Matches("Co")) + + m = &literalSuffixStringMatcher{left: &emptyStringMatcher{}, suffix: "co", suffixCaseSensitive: false} + require.True(t, m.Matches("co")) + require.False(t, m.Matches("marco")) + require.False(t, m.Matches("coo")) + require.True(t, m.Matches("Co")) + + m = &literalSuffixStringMatcher{left: &equalStringMatcher{s: "mar", caseSensitive: false}, suffix: "co", suffixCaseSensitive: true} + require.True(t, m.Matches("marco")) + require.True(t, m.Matches("MARco")) + require.False(t, m.Matches("marCO")) + require.False(t, m.Matches("mar")) + require.False(t, m.Matches("marcopracucci")) + + m = &literalSuffixStringMatcher{left: &equalStringMatcher{s: "mar", caseSensitive: false}, suffix: "co", suffixCaseSensitive: false} + require.True(t, m.Matches("marco")) + require.True(t, m.Matches("MARco")) + require.True(t, m.Matches("marCO")) + require.False(t, m.Matches("mar")) + require.False(t, m.Matches("marcopracucci")) +} + +func TestHasPrefixCaseInsensitive(t *testing.T) { + require.True(t, hasPrefixCaseInsensitive("marco", "mar")) + require.True(t, hasPrefixCaseInsensitive("mArco", "mar")) + require.True(t, hasPrefixCaseInsensitive("marco", "MaR")) + require.True(t, hasPrefixCaseInsensitive("marco", "marco")) + require.True(t, hasPrefixCaseInsensitive("mArco", "marco")) + + require.False(t, hasPrefixCaseInsensitive("marco", "a")) + require.False(t, hasPrefixCaseInsensitive("marco", "abcdefghi")) +} + +func TestHasSuffixCaseInsensitive(t *testing.T) { + require.True(t, hasSuffixCaseInsensitive("marco", "rco")) + require.True(t, hasSuffixCaseInsensitive("marco", "RcO")) + require.True(t, hasSuffixCaseInsensitive("marco", "marco")) + require.False(t, hasSuffixCaseInsensitive("marco", "a")) + require.False(t, hasSuffixCaseInsensitive("marco", "abcdefghi")) +} + +func TestContainsInOrder(t *testing.T) { + require.True(t, containsInOrder("abcdefghilmno", []string{"ab", "cd", "no"})) + require.True(t, containsInOrder("abcdefghilmno", []string{"def", "hil"})) + + require.False(t, containsInOrder("abcdefghilmno", []string{"ac"})) + require.False(t, containsInOrder("abcdefghilmno", []string{"ab", "cd", "de"})) + require.False(t, containsInOrder("abcdefghilmno", []string{"cd", "ab"})) +} + +func getTestNameFromRegexp(re string) string { + if len(re) > 32 { + return re[:32] + } + return re +} + +func generateRandomValues() []string { + // Init the random seed with a constant, so that it doesn't change between runs. + randGenerator := rand.New(rand.NewSource(1)) + + // Generate variable lengths random texts to match against. + texts := append([]string{}, randStrings(randGenerator, 10, 10)...) + texts = append(texts, randStrings(randGenerator, 5, 30)...) + texts = append(texts, randStrings(randGenerator, 1, 100)...) + texts = append(texts, "foo"+randString(randGenerator, 50)) + texts = append(texts, randString(randGenerator, 50)+"foo") + + return texts +} + +func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatcher)) { + callback(matcher) + + switch casted := matcher.(type) { + case *containsStringMatcher: + if casted.left != nil { + visitStringMatcher(casted.left, callback) + } + if casted.right != nil { + visitStringMatcher(casted.right, callback) + } + + case *literalPrefixSensitiveStringMatcher: + visitStringMatcher(casted.right, callback) + + case *literalPrefixInsensitiveStringMatcher: + visitStringMatcher(casted.right, callback) + + case *literalSuffixStringMatcher: + visitStringMatcher(casted.left, callback) + + case orStringMatcher: + for _, entry := range casted { + visitStringMatcher(entry, callback) + } + + // No nested matchers for the following ones. + case *equalMultiStringMapMatcher: + for _, prefixes := range casted.prefixes { + for _, matcher := range prefixes { + visitStringMatcher(matcher, callback) + } + } + + case emptyStringMatcher: + case *equalStringMatcher: + case *equalMultiStringSliceMatcher: + case anyStringWithoutNewlineMatcher: + case *anyNonEmptyStringMatcher: + case trueMatcher: + } +} + +func TestToNormalisedLower(t *testing.T) { + testCases := map[string]string{ + "foo": "foo", + "FOO": "foo", + "Foo": "foo", + "foO": "foo", + "fOo": "foo", + "AAAAAAAAAAAAAAAAAAAAAAAA": "aaaaaaaaaaaaaaaaaaaaaaaa", + "cccccccccccccccccccccccC": "cccccccccccccccccccccccc", + "ſſſſſſſſſſſſſſſſſſſſſſſſS": "sssssssssssssssssssssssss", + "ſſAſſa": "ssassa", + } + for input, expectedOutput := range testCases { + require.Equal(t, expectedOutput, toNormalisedLower(input)) } } diff --git a/model/labels/sharding.go b/model/labels/sharding.go new file mode 100644 index 0000000000..8b3a369397 --- /dev/null +++ b/model/labels/sharding.go @@ -0,0 +1,47 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !stringlabels && !dedupelabels + +package labels + +import ( + "github.com/cespare/xxhash/v2" +) + +// StableHash is a labels hashing implementation which is guaranteed to not change over time. +// This function should be used whenever labels hashing backward compatibility must be guaranteed. +func StableHash(ls Labels) uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for i, v := range ls { + if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+ do not allocate whole entry. + h := xxhash.New() + _, _ = h.Write(b) + for _, v := range ls[i:] { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, v.Name...) + b = append(b, sep) + b = append(b, v.Value...) + b = append(b, sep) + } + return xxhash.Sum64(b) +} diff --git a/model/labels/sharding_dedupelabels.go b/model/labels/sharding_dedupelabels.go new file mode 100644 index 0000000000..5bf41b05d6 --- /dev/null +++ b/model/labels/sharding_dedupelabels.go @@ -0,0 +1,52 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build dedupelabels + +package labels + +import ( + "github.com/cespare/xxhash/v2" +) + +// StableHash is a labels hashing implementation which is guaranteed to not change over time. +// This function should be used whenever labels hashing backward compatibility must be guaranteed. +func StableHash(ls Labels) uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for pos := 0; pos < len(ls.data); { + name, newPos := decodeString(ls.syms, ls.data, pos) + value, newPos := decodeString(ls.syms, ls.data, newPos) + if len(b)+len(name)+len(value)+2 >= cap(b) { + // If labels entry is 1KB+, hash the rest of them via Write(). + h := xxhash.New() + _, _ = h.Write(b) + for pos < len(ls.data) { + name, pos = decodeString(ls.syms, ls.data, pos) + value, pos = decodeString(ls.syms, ls.data, pos) + _, _ = h.WriteString(name) + _, _ = h.Write(seps) + _, _ = h.WriteString(value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, name...) + b = append(b, sep) + b = append(b, value...) + b = append(b, sep) + pos = newPos + } + return xxhash.Sum64(b) +} diff --git a/model/labels/sharding_stringlabels.go b/model/labels/sharding_stringlabels.go new file mode 100644 index 0000000000..798f268eb9 --- /dev/null +++ b/model/labels/sharding_stringlabels.go @@ -0,0 +1,54 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build stringlabels + +package labels + +import ( + "github.com/cespare/xxhash/v2" +) + +// StableHash is a labels hashing implementation which is guaranteed to not change over time. +// This function should be used whenever labels hashing backward compatibility must be guaranteed. +func StableHash(ls Labels) uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + var h *xxhash.Digest + for i := 0; i < len(ls.data); { + var v Label + v.Name, i = decodeString(ls.data, i) + v.Value, i = decodeString(ls.data, i) + if h == nil && len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+, switch to Write API. Copy in the values up to this point. + h = xxhash.New() + _, _ = h.Write(b) + } + if h != nil { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + continue + } + + b = append(b, v.Name...) + b = append(b, sep) + b = append(b, v.Value...) + b = append(b, sep) + } + if h != nil { + return h.Sum64() + } + return xxhash.Sum64(b) +} diff --git a/model/labels/sharding_test.go b/model/labels/sharding_test.go new file mode 100644 index 0000000000..78e3047509 --- /dev/null +++ b/model/labels/sharding_test.go @@ -0,0 +1,32 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestStableHash tests that StableHash is stable. +// The hashes this test asserts should not be changed. +func TestStableHash(t *testing.T) { + for expectedHash, lbls := range map[uint64]Labels{ + 0xef46db3751d8e999: EmptyLabels(), + 0x347c8ee7a9e29708: FromStrings("hello", "world"), + 0xcbab40540f26097d: FromStrings(MetricName, "metric", "label", "value"), + } { + require.Equal(t, expectedHash, StableHash(lbls)) + } +} diff --git a/model/labels/test_utils.go b/model/labels/test_utils.go index 05b8168825..d060def481 100644 --- a/model/labels/test_utils.go +++ b/model/labels/test_utils.go @@ -50,7 +50,7 @@ func ReadLabels(fn string, n int) ([]Labels, error) { defer f.Close() scanner := bufio.NewScanner(f) - b := ScratchBuilder{} + b := NewScratchBuilder(0) var mets []Labels hashes := map[uint64]struct{}{} diff --git a/model/metadata/metadata.go b/model/metadata/metadata.go index f227af0b95..1b7e63e0f3 100644 --- a/model/metadata/metadata.go +++ b/model/metadata/metadata.go @@ -13,11 +13,11 @@ package metadata -import "github.com/prometheus/prometheus/model/textparse" +import "github.com/prometheus/common/model" // Metadata stores a series' metadata information. type Metadata struct { - Type textparse.MetricType - Unit string - Help string + Type model.MetricType `json:"type"` + Unit string `json:"unit"` + Help string `json:"help"` } diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index fadf35b867..eb79f7be21 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -17,6 +17,7 @@ import ( "crypto/md5" "encoding/binary" "fmt" + "strconv" "strings" "github.com/grafana/regexp" @@ -48,7 +49,7 @@ const ( Drop Action = "drop" // KeepEqual drops targets for which the input does not match the target. KeepEqual Action = "keepequal" - // Drop drops targets for which the input does match the target. + // DropEqual drops targets for which the input does match the target. DropEqual Action = "dropequal" // HashMod sets a label to the modulus of a hash of labels. HashMod Action = "hashmod" @@ -108,6 +109,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Regex.Regexp == nil { c.Regex = MustNewRegexp("") } + return c.Validate() +} + +func (c *Config) Validate() error { if c.Action == "" { return fmt.Errorf("relabel action cannot be empty") } @@ -117,7 +122,13 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !relabelTarget.MatchString(c.TargetLabel) { + if c.Action == Replace && !strings.Contains(c.TargetLabel, "$") && !model.LabelName(c.TargetLabel).IsValid() { + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + if c.Action == Replace && strings.Contains(c.TargetLabel, "$") && !relabelTarget.MatchString(c.TargetLabel) { + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !model.LabelName(c.TargetLabel).IsValid() { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { @@ -160,7 +171,7 @@ type Regexp struct { // NewRegexp creates a new anchored Regexp and returns an error if the // passed-in regular expression does not compile. func NewRegexp(s string) (Regexp, error) { - regex, err := regexp.Compile("^(?:" + s + ")$") + regex, err := regexp.Compile("^(?s:" + s + ")$") return Regexp{Regexp: regex}, err } @@ -195,11 +206,20 @@ func (re Regexp) MarshalYAML() (interface{}, error) { return nil, nil } +// IsZero implements the yaml.IsZeroer interface. +func (re Regexp) IsZero() bool { + return re.Regexp == DefaultRelabelConfig.Regex.Regexp +} + // String returns the original string used to compile the regular expression. func (re Regexp) String() string { + if re.Regexp == nil { + return "" + } + str := re.Regexp.String() - // Trim the anchor `^(?:` prefix and `)$` suffix. - return str[4 : len(str)-2] + // Trim the anchor `^(?s:` prefix and `)$` suffix. + return str[5 : len(str)-2] } // Process returns a relabeled version of the given label set. The relabel configurations @@ -257,6 +277,13 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return false } case Replace: + // Fast path to add or delete label pair. + if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && + !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { + lb.Set(cfg.TargetLabel, cfg.Replacement) + break + } + indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. if indexes == nil { @@ -264,12 +291,11 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { } target := model.LabelName(cfg.Regex.ExpandString([]byte{}, cfg.TargetLabel, val, indexes)) if !target.IsValid() { - lb.Del(cfg.TargetLabel) break } res := cfg.Regex.ExpandString([]byte{}, cfg.Replacement, val, indexes) if len(res) == 0 { - lb.Del(cfg.TargetLabel) + lb.Del(string(target)) break } lb.Set(string(target), string(res)) @@ -281,7 +307,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { hash := md5.Sum([]byte(val)) // Use only the last 8 bytes of the hash to give the same result as earlier versions of this code. mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus - lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) + lb.Set(cfg.TargetLabel, strconv.FormatUint(mod, 10)) case LabelMap: lb.Range(func(l labels.Label) { if cfg.Regex.MatchString(l.Name) { @@ -307,3 +333,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return true } + +func varInRegexTemplate(template string) bool { + return strings.Contains(template, "$") +} diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index b50ff4010a..0c6d41f5e3 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -14,6 +14,7 @@ package relabel import ( + "strconv" "testing" "github.com/prometheus/common/model" @@ -21,6 +22,7 @@ import ( "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/testutil" ) func TestRelabel(t *testing.T) { @@ -213,6 +215,25 @@ func TestRelabel(t *testing.T) { "a": "boo", }), }, + { + // Blank replacement should delete the label. + input: labels.FromMap(map[string]string{ + "a": "foo", + "f": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("(f).*"), + TargetLabel: "$1", + Replacement: "$2", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "foo", + }), + }, { input: labels.FromMap(map[string]string{ "a": "foo", @@ -334,7 +355,7 @@ func TestRelabel(t *testing.T) { }, { // invalid target_labels input: labels.FromMap(map[string]string{ - "a": "some-name-value", + "a": "some-name-0", }), relabel: []*Config{ { @@ -349,18 +370,18 @@ func TestRelabel(t *testing.T) { Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), Action: Replace, Replacement: "${1}", - TargetLabel: "0${3}", + TargetLabel: "${3}", }, { SourceLabels: model.LabelNames{"a"}, - Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Regex: MustNewRegexp("some-([^-]+)(-[^,]+)"), Action: Replace, Replacement: "${1}", - TargetLabel: "-${3}", + TargetLabel: "${3}", }, }, output: labels.FromMap(map[string]string{ - "a": "some-name-value", + "a": "some-name-0", }), }, { // more complex real-life like usecase @@ -548,6 +569,29 @@ func TestRelabel(t *testing.T) { }, drop: true, }, + { + input: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("line1.*line2"), + TargetLabel: "d", + Separator: ";", + Replacement: "match${1}", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + "d": "match", + }), + }, } for _, test := range tests { @@ -565,16 +609,88 @@ func TestRelabel(t *testing.T) { if cfg.Replacement == "" { cfg.Replacement = DefaultRelabelConfig.Replacement } + require.NoError(t, cfg.Validate()) } res, keep := Process(test.input, test.relabel...) require.Equal(t, !test.drop, keep) if keep { - require.Equal(t, test.output, res) + testutil.RequireEqual(t, test.output, res) } } } +func TestRelabelValidate(t *testing.T) { + tests := []struct { + config Config + expected string + }{ + { + config: Config{}, + expected: `relabel action cannot be empty`, + }, + { + config: Config{ + Action: Replace, + }, + expected: `requires 'target_label' value`, + }, + { + config: Config{ + Action: Lowercase, + }, + expected: `requires 'target_label' value`, + }, + { + config: Config{ + Action: Lowercase, + Replacement: DefaultRelabelConfig.Replacement, + TargetLabel: "${3}", + }, + expected: `"${3}" is invalid 'target_label'`, + }, + { + config: Config{ + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: Replace, + Replacement: "${1}", + TargetLabel: "${3}", + }, + }, + { + config: Config{ + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: Replace, + Replacement: "${1}", + TargetLabel: "0${3}", + }, + expected: `"0${3}" is invalid 'target_label'`, + }, + { + config: Config{ + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: Replace, + Replacement: "${1}", + TargetLabel: "-${3}", + }, + expected: `"-${3}" is invalid 'target_label' for replace action`, + }, + } + for i, test := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + err := test.config.Validate() + if test.expected == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, test.expected) + } + }) + } +} + func TestTargetLabelValidity(t *testing.T) { tests := []struct { str string @@ -745,6 +861,34 @@ func BenchmarkRelabel(b *testing.B) { "__scrape_timeout__", "10s", "job", "kubernetes-pods"), }, + { + name: "static label pair", + config: ` + - replacement: wwwwww + target_label: wwwwww + - replacement: yyyyyyyyyyyy + target_label: xxxxxxxxx + - replacement: xxxxxxxxx + target_label: yyyyyyyyyyyy + - source_labels: ["something"] + target_label: with_source_labels + replacement: value + - replacement: dropped + target_label: ${0} + - replacement: ${0} + target_label: dropped`, + lbls: labels.FromStrings( + "abcdefg01", "hijklmn1", + "abcdefg02", "hijklmn2", + "abcdefg03", "hijklmn3", + "abcdefg04", "hijklmn4", + "abcdefg05", "hijklmn5", + "abcdefg06", "hijklmn6", + "abcdefg07", "hijklmn7", + "abcdefg08", "hijklmn8", + "job", "foo", + ), + }, } for i := range tests { err := yaml.UnmarshalStrict([]byte(tests[i].config), &tests[i].cfgs) @@ -758,3 +902,65 @@ func BenchmarkRelabel(b *testing.B) { }) } } + +func TestConfig_UnmarshalThenMarshal(t *testing.T) { + tests := []struct { + name string + inputYaml string + }{ + { + name: "Values provided", + inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port] +separator: ; +regex: \\d+ +target_label: __meta_kubernetes_pod_container_port_number +replacement: $1 +action: replace +`, + }, + { + name: "No regex provided", + inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port] +separator: ; +target_label: __meta_kubernetes_pod_container_port_number +replacement: $1 +action: keepequal +`, + }, + { + name: "Default regex provided", + inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port] +separator: ; +regex: (.*) +target_label: __meta_kubernetes_pod_container_port_number +replacement: $1 +action: replace +`, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + unmarshalled := Config{} + err := yaml.Unmarshal([]byte(test.inputYaml), &unmarshalled) + require.NoError(t, err) + + marshalled, err := yaml.Marshal(&unmarshalled) + require.NoError(t, err) + + require.Equal(t, test.inputYaml, string(marshalled)) + }) + } +} + +func TestRegexp_ShouldMarshalAndUnmarshalZeroValue(t *testing.T) { + var zero Regexp + + marshalled, err := yaml.Marshal(&zero) + require.NoError(t, err) + require.Equal(t, "null\n", string(marshalled)) + + var unmarshalled Regexp + err = yaml.Unmarshal(marshalled, &unmarshalled) + require.NoError(t, err) + require.Nil(t, unmarshalled.Regexp) +} diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index 30b3face0d..bfb85ce740 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -27,6 +27,7 @@ import ( "gopkg.in/yaml.v3" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/template" ) @@ -135,10 +136,11 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { // RuleGroup is a list of sequentially evaluated recording and alerting rules. type RuleGroup struct { - Name string `yaml:"name"` - Interval model.Duration `yaml:"interval,omitempty"` - Limit int `yaml:"limit,omitempty"` - Rules []RuleNode `yaml:"rules"` + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + QueryOffset *model.Duration `yaml:"query_offset,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []RuleNode `yaml:"rules"` } // Rule describes an alerting or recording rule. @@ -173,17 +175,11 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { }) } if r.Record.Value == "" && r.Alert.Value == "" { - if r.Record.Value == "0" { - nodes = append(nodes, WrappedError{ - err: fmt.Errorf("one of 'record' or 'alert' must be set"), - node: &r.Alert, - }) - } else { - nodes = append(nodes, WrappedError{ - err: fmt.Errorf("one of 'record' or 'alert' must be set"), - node: &r.Record, - }) - } + nodes = append(nodes, WrappedError{ + err: fmt.Errorf("one of 'record' or 'alert' must be set"), + node: &r.Record, + nodeAlt: &r.Alert, + }) } if r.Expr.Value == "" { @@ -262,7 +258,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) { } // Trying to parse templates. - tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, "", 0) + tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, "", promql.Sample{}) defs := []string{ "{{$labels := .Labels}}", "{{$externalLabels := .ExternalLabels}}", diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index d6499538e2..ef5008f4bf 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -193,10 +193,10 @@ groups: _, errs := Parse([]byte(group)) require.Len(t, errs, 2, "Expected two errors") var err00 *Error - require.True(t, errors.As(errs[0], &err00)) + require.ErrorAs(t, errs[0], &err00) err0 := err00.Err.node var err01 *Error - require.True(t, errors.As(errs[1], &err01)) + require.ErrorAs(t, errs[1], &err01) err1 := err01.Err.node require.NotEqual(t, err0, err1, "Error nodes should not be the same") } diff --git a/model/rulefmt/testdata/bad_field.bad.yaml b/model/rulefmt/testdata/bad_field.bad.yaml index d85eab1e5f..729bbadfbc 100644 --- a/model/rulefmt/testdata/bad_field.bad.yaml +++ b/model/rulefmt/testdata/bad_field.bad.yaml @@ -6,4 +6,4 @@ groups: labels: instance: localhost annotation: - summary: annonations is written without s above + summary: annotations is written without s above diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 38903afc96..7de88a4869 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -16,6 +16,8 @@ package textparse import ( "mime" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -42,7 +44,7 @@ type Parser interface { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. - Type() ([]byte, MetricType) + Type() ([]byte, model.MetricType) // Unit returns the metric name and unit in the current entry. // Must only be called after Next returned a unit entry. @@ -64,8 +66,13 @@ type Parser interface { // retrieved (including the case where no exemplars exist at all). Exemplar(l *exemplar.Exemplar) bool - // Next advances the parser to the next sample. It returns false if no - // more samples were read or an error occurred. + // CreatedTimestamp returns the created timestamp (in milliseconds) for the + // current sample. It returns nil if it is unknown e.g. if it wasn't set, + // if the scrape protocol or metric type does not support created timestamps. + CreatedTimestamp() *int64 + + // Next advances the parser to the next sample. + // It returns (EntryInvalid, io.EOF) if no samples were read. Next() (Entry, error) } @@ -73,22 +80,24 @@ type Parser interface { // // This function always returns a valid parser, but might additionally // return an error if the content type cannot be parsed. -func New(b []byte, contentType string, parseClassicHistograms bool) (Parser, error) { +func New(b []byte, contentType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) { if contentType == "" { - return NewPromParser(b), nil + return NewPromParser(b, st), nil } mediaType, _, err := mime.ParseMediaType(contentType) if err != nil { - return NewPromParser(b), err + return NewPromParser(b, st), err } switch mediaType { case "application/openmetrics-text": - return NewOpenMetricsParser(b), nil + return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) { + o.SkipCTSeries = skipOMCTSeries + }), nil case "application/vnd.google.protobuf": - return NewProtobufParser(b, parseClassicHistograms), nil + return NewProtobufParser(b, parseClassicHistograms, st), nil default: - return NewPromParser(b), nil + return NewPromParser(b, st), nil } } @@ -99,22 +108,8 @@ const ( EntryInvalid Entry = -1 EntryType Entry = 0 EntryHelp Entry = 1 - EntrySeries Entry = 2 // A series with a simple float64 as value. + EntrySeries Entry = 2 // EntrySeries marks a series with a simple float64 as value. EntryComment Entry = 3 EntryUnit Entry = 4 - EntryHistogram Entry = 5 // A series with a native histogram as a value. -) - -// MetricType represents metric type values. -type MetricType string - -const ( - MetricTypeCounter = MetricType("counter") - MetricTypeGauge = MetricType("gauge") - MetricTypeHistogram = MetricType("histogram") - MetricTypeGaugeHistogram = MetricType("gaugehistogram") - MetricTypeSummary = MetricType("summary") - MetricTypeInfo = MetricType("info") - MetricTypeStateset = MetricType("stateset") - MetricTypeUnknown = MetricType("unknown") + EntryHistogram Entry = 5 // EntryHistogram marks a series with a native histogram as a value. ) diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index de140d6819..970b96706e 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -17,6 +17,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" ) func TestNewParser(t *testing.T) { @@ -91,7 +93,7 @@ func TestNewParser(t *testing.T) { tt := tt // Copy to local variable before going parallel. t.Parallel() - p, err := New([]byte{}, tt.contentType, false) + p, err := New([]byte{}, tt.contentType, false, false, labels.NewSymbolTable()) tt.validateParser(t, p) if tt.err == "" { require.NoError(t, err) diff --git a/model/textparse/omtestdata.txt b/model/textparse/omtestdata.txt new file mode 100644 index 0000000000..0f5f78b8b9 --- /dev/null +++ b/model/textparse/omtestdata.txt @@ -0,0 +1,64 @@ +# HELP go_build_info Build information about the main Go module. +# TYPE go_build_info gauge +go_build_info{checksum="",path="",version=""} 1.0 +# HELP promhttp_metric_handler_errors Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors counter +promhttp_metric_handler_errors_total{cause="encoding"} 0.0 +promhttp_metric_handler_errors_created{cause="encoding"} 1.726839813016397e+09 +promhttp_metric_handler_errors_total{cause="gathering"} 0.0 +promhttp_metric_handler_errors_created{cause="gathering"} 1.726839813016395e+09 +# HELP rpc_durations_histogram_seconds RPC latency distributions. +# TYPE rpc_durations_histogram_seconds histogram +rpc_durations_histogram_seconds_bucket{le="-0.00099"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.00089"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0007899999999999999"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0006899999999999999"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0005899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0004899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0003899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0002899999999999998"} 3 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09 +rpc_durations_histogram_seconds_bucket{le="-0.0001899999999999998"} 5 # {dummyID="84741"} -0.00020178290006788965 1.726839814829977e+09 +rpc_durations_histogram_seconds_bucket{le="-8.999999999999979e-05"} 5 +rpc_durations_histogram_seconds_bucket{le="1.0000000000000216e-05"} 8 # {dummyID="19206"} -4.6156147425468016e-05 1.7268398151337721e+09 +rpc_durations_histogram_seconds_bucket{le="0.00011000000000000022"} 9 # {dummyID="3974"} 9.528436760156754e-05 1.726839814526797e+09 +rpc_durations_histogram_seconds_bucket{le="0.00021000000000000023"} 11 # {dummyID="29640"} 0.00017459624183458996 1.7268398139220061e+09 +rpc_durations_histogram_seconds_bucket{le="0.0003100000000000002"} 15 # {dummyID="9818"} 0.0002791130914009552 1.7268398149821382e+09 +rpc_durations_histogram_seconds_bucket{le="0.0004100000000000002"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0005100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0006100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0007100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0008100000000000004"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0009100000000000004"} 15 +rpc_durations_histogram_seconds_bucket{le="+Inf"} 15 +rpc_durations_histogram_seconds_sum -8.452185437166741e-05 +rpc_durations_histogram_seconds_count 15 +rpc_durations_histogram_seconds_created 1.726839813016302e+09 +# HELP rpc_durations_seconds RPC latency distributions. +# TYPE rpc_durations_seconds summary +rpc_durations_seconds{service="exponential",quantile="0.5"} 7.689368882420941e-07 +rpc_durations_seconds{service="exponential",quantile="0.9"} 1.6537614174305048e-06 +rpc_durations_seconds{service="exponential",quantile="0.99"} 2.0965499063061924e-06 +rpc_durations_seconds_sum{service="exponential"} 2.0318666372575776e-05 +rpc_durations_seconds_count{service="exponential"} 22 +rpc_durations_seconds_created{service="exponential"} 1.7268398130168908e+09 +rpc_durations_seconds{service="normal",quantile="0.5"} -5.066758674917046e-06 +rpc_durations_seconds{service="normal",quantile="0.9"} 0.0002935723711788224 +rpc_durations_seconds{service="normal",quantile="0.99"} 0.0003023094636293776 +rpc_durations_seconds_sum{service="normal"} -8.452185437166741e-05 +rpc_durations_seconds_count{service="normal"} 15 +rpc_durations_seconds_created{service="normal"} 1.726839813016714e+09 +rpc_durations_seconds{service="uniform",quantile="0.5"} 9.005014931474918e-05 +rpc_durations_seconds{service="uniform",quantile="0.9"} 0.00017801230208182325 +rpc_durations_seconds{service="uniform",quantile="0.99"} 0.00018641524538180192 +rpc_durations_seconds_sum{service="uniform"} 0.0011666095700533677 +rpc_durations_seconds_count{service="uniform"} 11 +rpc_durations_seconds_created{service="uniform"} 1.72683981301684e+09 +# HELP rpc_requests Total number of RPC requests received. +# TYPE rpc_requests counter +rpc_requests_total{service="exponential"} 22.0 +rpc_requests_created{service="exponential"} 1.726839813016893e+09 +rpc_requests_total{service="normal"} 15.0 +rpc_requests_created{service="normal"} 1.726839813016717e+09 +rpc_requests_total{service="uniform"} 11.0 +rpc_requests_created{service="uniform"} 1.7268398130168471e+09 +# EOF diff --git a/model/textparse/openmetricslex.l b/model/textparse/openmetricslex.l index 91e4439423..9afbbbd8bd 100644 --- a/model/textparse/openmetricslex.l +++ b/model/textparse/openmetricslex.l @@ -50,12 +50,15 @@ S [ ] TYPE{S} l.state = sMeta1; return tType UNIT{S} l.state = sMeta1; return tUnit "EOF"\n? l.state = sInit; return tEOFWord +\"(\\.|[^\\"])*\" l.state = sMeta2; return tMName {M}({M}|{D})* l.state = sMeta2; return tMName {S}{C}*\n l.state = sInit; return tText {M}({M}|{D})* l.state = sValue; return tMName \{ l.state = sLabels; return tBraceOpen +\{ l.state = sLabels; return tBraceOpen {L}({L}|{D})* return tLName +\"(\\.|[^\\"])*\" l.state = sLabels; return tQString \} l.state = sValue; return tBraceClose = l.state = sLValue; return tEqual , return tComma diff --git a/model/textparse/openmetricslex.l.go b/model/textparse/openmetricslex.l.go index 9f17cfd436..c8789ef60d 100644 --- a/model/textparse/openmetricslex.l.go +++ b/model/textparse/openmetricslex.l.go @@ -37,25 +37,25 @@ yystate0: case 0: // start condition: INITIAL goto yystart1 case 1: // start condition: sComment - goto yystart5 + goto yystart6 case 2: // start condition: sMeta1 - goto yystart25 + goto yystart26 case 3: // start condition: sMeta2 - goto yystart27 + goto yystart31 case 4: // start condition: sLabels - goto yystart30 + goto yystart34 case 5: // start condition: sLValue - goto yystart35 + goto yystart42 case 6: // start condition: sValue - goto yystart39 + goto yystart46 case 7: // start condition: sTimestamp - goto yystart43 - case 8: // start condition: sExemplar goto yystart50 + case 8: // start condition: sExemplar + goto yystart57 case 9: // start condition: sEValue - goto yystart55 + goto yystart62 case 10: // start condition: sETimestamp - goto yystart61 + goto yystart68 } yystate1: @@ -68,6 +68,8 @@ yystart1: goto yystate2 case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': goto yystate4 + case c == '{': + goto yystate5 } yystate2: @@ -87,514 +89,574 @@ yystate4: c = l.next() switch { default: - goto yyrule8 + goto yyrule9 case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': goto yystate4 } yystate5: c = l.next() -yystart5: + goto yyrule11 + +yystate6: + c = l.next() +yystart6: switch { default: goto yyabort case c == 'E': - goto yystate6 + goto yystate7 case c == 'H': - goto yystate10 + goto yystate11 case c == 'T': - goto yystate15 + goto yystate16 case c == 'U': - goto yystate20 + goto yystate21 } -yystate6: +yystate7: c = l.next() switch { default: goto yyabort case c == 'O': - goto yystate7 + goto yystate8 } -yystate7: +yystate8: c = l.next() switch { default: goto yyabort case c == 'F': - goto yystate8 + goto yystate9 } -yystate8: +yystate9: c = l.next() switch { default: goto yyrule5 case c == '\n': - goto yystate9 + goto yystate10 } -yystate9: +yystate10: c = l.next() goto yyrule5 -yystate10: +yystate11: c = l.next() switch { default: goto yyabort case c == 'E': - goto yystate11 + goto yystate12 } -yystate11: +yystate12: c = l.next() switch { default: goto yyabort case c == 'L': - goto yystate12 + goto yystate13 } -yystate12: +yystate13: c = l.next() switch { default: goto yyabort case c == 'P': - goto yystate13 + goto yystate14 } -yystate13: +yystate14: c = l.next() switch { default: goto yyabort case c == ' ': - goto yystate14 + goto yystate15 } -yystate14: +yystate15: c = l.next() goto yyrule2 -yystate15: +yystate16: c = l.next() switch { default: goto yyabort case c == 'Y': - goto yystate16 + goto yystate17 } -yystate16: +yystate17: c = l.next() switch { default: goto yyabort case c == 'P': - goto yystate17 + goto yystate18 } -yystate17: +yystate18: c = l.next() switch { default: goto yyabort case c == 'E': - goto yystate18 + goto yystate19 } -yystate18: +yystate19: c = l.next() switch { default: goto yyabort case c == ' ': - goto yystate19 + goto yystate20 } -yystate19: +yystate20: c = l.next() goto yyrule3 -yystate20: +yystate21: c = l.next() switch { default: goto yyabort case c == 'N': - goto yystate21 + goto yystate22 } -yystate21: +yystate22: c = l.next() switch { default: goto yyabort case c == 'I': - goto yystate22 + goto yystate23 } -yystate22: +yystate23: c = l.next() switch { default: goto yyabort case c == 'T': - goto yystate23 + goto yystate24 } -yystate23: +yystate24: c = l.next() switch { default: goto yyabort case c == ' ': - goto yystate24 + goto yystate25 } -yystate24: +yystate25: c = l.next() goto yyrule4 -yystate25: +yystate26: c = l.next() -yystart25: +yystart26: switch { default: goto yyabort + case c == '"': + goto yystate27 case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate26 + goto yystate30 } -yystate26: +yystate27: c = l.next() switch { default: - goto yyrule6 + goto yyabort + case c == '"': + goto yystate28 + case c == '\\': + goto yystate29 + case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate27 + } + +yystate28: + c = l.next() + goto yyrule6 + +yystate29: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate27 + } + +yystate30: + c = l.next() + switch { + default: + goto yyrule7 case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate26 + goto yystate30 } -yystate27: +yystate31: c = l.next() -yystart27: +yystart31: switch { default: goto yyabort case c == ' ': - goto yystate28 + goto yystate32 } -yystate28: +yystate32: c = l.next() switch { default: goto yyabort case c == '\n': - goto yystate29 + goto yystate33 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': - goto yystate28 + goto yystate32 } -yystate29: +yystate33: c = l.next() - goto yyrule7 + goto yyrule8 -yystate30: +yystate34: c = l.next() -yystart30: +yystart34: switch { default: goto yyabort + case c == '"': + goto yystate35 case c == ',': - goto yystate31 + goto yystate38 case c == '=': - goto yystate32 + goto yystate39 case c == '}': - goto yystate34 + goto yystate41 case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate33 + goto yystate40 } -yystate31: +yystate35: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate36 + case c == '\\': + goto yystate37 + case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate35 + } + +yystate36: c = l.next() goto yyrule13 -yystate32: +yystate37: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate35 + } + +yystate38: c = l.next() - goto yyrule12 + goto yyrule16 -yystate33: +yystate39: + c = l.next() + goto yyrule15 + +yystate40: c = l.next() switch { default: - goto yyrule10 + goto yyrule12 case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate33 + goto yystate40 } -yystate34: +yystate41: c = l.next() - goto yyrule11 + goto yyrule14 -yystate35: +yystate42: c = l.next() -yystart35: +yystart42: switch { default: goto yyabort case c == '"': - goto yystate36 + goto yystate43 } -yystate36: +yystate43: c = l.next() switch { default: goto yyabort case c == '"': - goto yystate37 + goto yystate44 case c == '\\': - goto yystate38 + goto yystate45 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': - goto yystate36 + goto yystate43 } -yystate37: +yystate44: c = l.next() - goto yyrule14 + goto yyrule17 -yystate38: +yystate45: c = l.next() switch { default: goto yyabort case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': - goto yystate36 + goto yystate43 } -yystate39: +yystate46: c = l.next() -yystart39: +yystart46: switch { default: goto yyabort case c == ' ': - goto yystate40 + goto yystate47 case c == '{': - goto yystate42 + goto yystate49 } -yystate40: +yystate47: c = l.next() switch { default: goto yyabort case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate41 + goto yystate48 } -yystate41: +yystate48: c = l.next() switch { default: - goto yyrule15 + goto yyrule18 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate41 + goto yystate48 } -yystate42: +yystate49: c = l.next() - goto yyrule9 + goto yyrule10 -yystate43: +yystate50: c = l.next() -yystart43: +yystart50: switch { default: goto yyabort case c == ' ': - goto yystate45 + goto yystate52 case c == '\n': - goto yystate44 + goto yystate51 } -yystate44: +yystate51: c = l.next() - goto yyrule17 + goto yyrule20 -yystate45: +yystate52: c = l.next() switch { default: goto yyabort case c == '#': - goto yystate47 + goto yystate54 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c == '!' || c == '"' || c >= '$' && c <= 'ÿ': - goto yystate46 + goto yystate53 } -yystate46: +yystate53: c = l.next() switch { default: - goto yyrule16 + goto yyrule19 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate46 + goto yystate53 } -yystate47: +yystate54: c = l.next() switch { default: - goto yyrule16 + goto yyrule19 case c == ' ': - goto yystate48 + goto yystate55 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate46 + goto yystate53 } -yystate48: +yystate55: c = l.next() switch { default: goto yyabort case c == '{': - goto yystate49 + goto yystate56 } -yystate49: +yystate56: c = l.next() - goto yyrule18 + goto yyrule21 -yystate50: +yystate57: c = l.next() -yystart50: +yystart57: switch { default: goto yyabort case c == ',': - goto yystate51 + goto yystate58 case c == '=': - goto yystate52 + goto yystate59 case c == '}': - goto yystate54 + goto yystate61 case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate53 + goto yystate60 } -yystate51: +yystate58: c = l.next() - goto yyrule23 + goto yyrule26 -yystate52: +yystate59: c = l.next() - goto yyrule21 + goto yyrule24 -yystate53: +yystate60: c = l.next() switch { default: - goto yyrule19 + goto yyrule22 case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate53 + goto yystate60 } -yystate54: +yystate61: c = l.next() - goto yyrule20 + goto yyrule23 -yystate55: +yystate62: c = l.next() -yystart55: +yystart62: switch { default: goto yyabort case c == ' ': - goto yystate56 + goto yystate63 case c == '"': - goto yystate58 + goto yystate65 } -yystate56: +yystate63: c = l.next() switch { default: goto yyabort case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate57 + goto yystate64 } -yystate57: +yystate64: c = l.next() switch { default: - goto yyrule24 + goto yyrule27 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate57 + goto yystate64 } -yystate58: +yystate65: c = l.next() switch { default: goto yyabort case c == '"': - goto yystate59 + goto yystate66 case c == '\\': - goto yystate60 + goto yystate67 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': - goto yystate58 + goto yystate65 } -yystate59: +yystate66: c = l.next() - goto yyrule22 + goto yyrule25 -yystate60: +yystate67: c = l.next() switch { default: goto yyabort case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': - goto yystate58 + goto yystate65 } -yystate61: +yystate68: c = l.next() -yystart61: +yystart68: switch { default: goto yyabort case c == ' ': - goto yystate63 + goto yystate70 case c == '\n': - goto yystate62 + goto yystate69 } -yystate62: +yystate69: c = l.next() - goto yyrule26 + goto yyrule29 -yystate63: +yystate70: c = l.next() switch { default: goto yyabort case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate64 + goto yystate71 } -yystate64: +yystate71: c = l.next() switch { default: - goto yyrule25 + goto yyrule28 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate64 + goto yystate71 } yyrule1: // #{S} @@ -626,115 +688,133 @@ yyrule5: // "EOF"\n? return tEOFWord goto yystate0 } -yyrule6: // {M}({M}|{D})* +yyrule6: // \"(\\.|[^\\"])*\" { l.state = sMeta2 return tMName goto yystate0 } -yyrule7: // {S}{C}*\n +yyrule7: // {M}({M}|{D})* + { + l.state = sMeta2 + return tMName + goto yystate0 + } +yyrule8: // {S}{C}*\n { l.state = sInit return tText goto yystate0 } -yyrule8: // {M}({M}|{D})* +yyrule9: // {M}({M}|{D})* { l.state = sValue return tMName goto yystate0 } -yyrule9: // \{ +yyrule10: // \{ + { + l.state = sLabels + return tBraceOpen + goto yystate0 + } +yyrule11: // \{ { l.state = sLabels return tBraceOpen goto yystate0 } -yyrule10: // {L}({L}|{D})* +yyrule12: // {L}({L}|{D})* { return tLName } -yyrule11: // \} +yyrule13: // \"(\\.|[^\\"])*\" + { + l.state = sLabels + return tQString + goto yystate0 + } +yyrule14: // \} { l.state = sValue return tBraceClose goto yystate0 } -yyrule12: // = +yyrule15: // = { l.state = sLValue return tEqual goto yystate0 } -yyrule13: // , +yyrule16: // , { return tComma } -yyrule14: // \"(\\.|[^\\"\n])*\" +yyrule17: // \"(\\.|[^\\"\n])*\" { l.state = sLabels return tLValue goto yystate0 } -yyrule15: // {S}[^ \n]+ +yyrule18: // {S}[^ \n]+ { l.state = sTimestamp return tValue goto yystate0 } -yyrule16: // {S}[^ \n]+ +yyrule19: // {S}[^ \n]+ { return tTimestamp } -yyrule17: // \n +yyrule20: // \n { l.state = sInit return tLinebreak goto yystate0 } -yyrule18: // {S}#{S}\{ +yyrule21: // {S}#{S}\{ { l.state = sExemplar return tComment goto yystate0 } -yyrule19: // {L}({L}|{D})* +yyrule22: // {L}({L}|{D})* { return tLName } -yyrule20: // \} +yyrule23: // \} { l.state = sEValue return tBraceClose goto yystate0 } -yyrule21: // = +yyrule24: // = { l.state = sEValue return tEqual goto yystate0 } -yyrule22: // \"(\\.|[^\\"\n])*\" +yyrule25: // \"(\\.|[^\\"\n])*\" { l.state = sExemplar return tLValue goto yystate0 } -yyrule23: // , +yyrule26: // , { return tComma } -yyrule24: // {S}[^ \n]+ +yyrule27: // {S}[^ \n]+ { l.state = sETimestamp return tValue goto yystate0 } -yyrule25: // {S}[^ \n]+ +yyrule28: // {S}[^ \n]+ { return tTimestamp } -yyrule26: // \n +yyrule29: // \n if true { // avoid go vet determining the below panic will not be reached l.state = sInit return tLinebreak @@ -743,9 +823,7 @@ yyrule26: // \n panic("unreachable") yyabort: // no lexem recognized - // // silence unused label errors for build and satisfy go vet reachability analysis - // { if false { goto yyabort @@ -757,34 +835,34 @@ yyabort: // no lexem recognized goto yystate1 } if false { - goto yystate5 + goto yystate6 } if false { - goto yystate25 + goto yystate26 } if false { - goto yystate27 + goto yystate31 } if false { - goto yystate30 + goto yystate34 } if false { - goto yystate35 + goto yystate42 } if false { - goto yystate39 + goto yystate46 } if false { - goto yystate43 + goto yystate50 } if false { - goto yystate50 + goto yystate57 } if false { - goto yystate55 + goto yystate62 } if false { - goto yystate61 + goto yystate68 } } diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index e0833636f1..0e82dc9f5c 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -24,6 +24,8 @@ import ( "strings" "unicode/utf8" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -74,11 +76,17 @@ type OpenMetricsParser struct { builder labels.ScratchBuilder series []byte text []byte - mtype MetricType + mtype model.MetricType val float64 ts int64 hasTS bool start int + // offsets is a list of offsets into series that describe the positions + // of the metric name and label names and values for this series. + // p.offsets[0] is the start character of the metric name. + // p.offsets[1] is the end of the metric name. + // Subsequently, p.offsets is a pair of pair of offsets for the positions + // of the label name and value start and end characters. offsets []int eOffsets []int @@ -86,11 +94,50 @@ type OpenMetricsParser struct { exemplarVal float64 exemplarTs int64 hasExemplarTs bool + + // Created timestamp parsing state. + ct int64 + ctHashSet uint64 + // visitedName is the metric name of the last visited metric when peeking ahead + // for _created series during the execution of the CreatedTimestamp method. + visitedName string + skipCTSeries bool +} + +type openMetricsParserOptions struct { + SkipCTSeries bool +} + +type OpenMetricsOption func(*openMetricsParserOptions) + +// WithOMParserCTSeriesSkipped turns off exposing _created lines +// as series, which makes those only used for parsing created timestamp +// for `CreatedTimestamp` method purposes. +// +// It's recommended to use this option to avoid using _created lines for other +// purposes than created timestamp, but leave false by default for the +// best-effort compatibility. +func WithOMParserCTSeriesSkipped() OpenMetricsOption { + return func(o *openMetricsParserOptions) { + o.SkipCTSeries = true + } } -// NewOpenMetricsParser returns a new parser of the byte slice. -func NewOpenMetricsParser(b []byte) Parser { - return &OpenMetricsParser{l: &openMetricsLexer{b: b}} +// NewOpenMetricsParser returns a new parser for the byte slice with option to skip CT series parsing. +func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsOption) Parser { + options := &openMetricsParserOptions{} + + for _, opt := range opts { + opt(options) + } + + parser := &OpenMetricsParser{ + l: &openMetricsLexer{b: b}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + skipCTSeries: options.SkipCTSeries, + } + + return parser } // Series returns the bytes of the series, the timestamp if set, and the value @@ -126,7 +173,7 @@ func (p *OpenMetricsParser) Help() ([]byte, []byte) { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. -func (p *OpenMetricsParser) Type() ([]byte, MetricType) { +func (p *OpenMetricsParser) Type() ([]byte, model.MetricType) { return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype } @@ -134,7 +181,6 @@ func (p *OpenMetricsParser) Type() ([]byte, MetricType) { // Must only be called after Next returned a unit entry. // The returned byte slices become invalid after the next call to Next. func (p *OpenMetricsParser) Unit() ([]byte, []byte) { - // The Prometheus format does not have units. return p.l.b[p.offsets[0]:p.offsets[1]], p.text } @@ -152,20 +198,18 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { s := string(p.series) p.builder.Reset() - p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start]) + metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start]) + p.builder.Add(labels.MetricName, metricName) - for i := 1; i < len(p.offsets); i += 4 { + for i := 2; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start b := p.offsets[i+1] - p.start + label := unreplace(s[a:b]) c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start + value := unreplace(s[c:d]) - value := s[c:d] - // Replacer causes allocations. Replace only when necessary. - if strings.IndexByte(s[c:d], byte('\\')) >= 0 { - value = lvalReplacer.Replace(value) - } - p.builder.Add(s[a:b], value) + p.builder.Add(label, value) } p.builder.Sort() @@ -211,6 +255,127 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { return true } +// CreatedTimestamp returns the created timestamp for a current Metric if exists or nil. +// NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. +func (p *OpenMetricsParser) CreatedTimestamp() *int64 { + if !typeRequiresCT(p.mtype) { + // Not a CT supported metric type, fast path. + p.ct = 0 + p.visitedName = "" + p.ctHashSet = 0 + return nil + } + + var ( + currLset labels.Labels + buf []byte + peekWithoutNameLsetHash uint64 + ) + p.Metric(&currLset) + currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") + currName := currLset.Get(model.MetricNameLabel) + currName = findBaseMetricName(currName) + + // make sure we're on a new metric before returning + if currName == p.visitedName && currFamilyLsetHash == p.ctHashSet && p.visitedName != "" && p.ctHashSet > 0 && p.ct > 0 { + // CT is already known, fast path. + return &p.ct + } + + // Create a new lexer to reset the parser once this function is done executing. + resetLexer := &openMetricsLexer{ + b: p.l.b, + i: p.l.i, + start: p.l.start, + err: p.l.err, + state: p.l.state, + } + + p.skipCTSeries = false + + for { + eType, err := p.Next() + if err != nil { + // This means p.Next() will give error too later on, so def no CT line found. + // This might result in partial scrape with wrong/missing CT, but only + // spec improvement would help. + // TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + p.resetCTParseValues(resetLexer) + return nil + } + if eType != EntrySeries { + // Assume we hit different family, no CT line found. + p.resetCTParseValues(resetLexer) + return nil + } + + var peekedLset labels.Labels + p.Metric(&peekedLset) + peekedName := peekedLset.Get(model.MetricNameLabel) + if !strings.HasSuffix(peekedName, "_created") { + // Not a CT line, search more. + continue + } + + // We got a CT line here, but let's search if CT line is actually for our series, edge case. + peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") + if peekWithoutNameLsetHash != currFamilyLsetHash { + // Found CT line for a different series, for our series no CT. + p.resetCTParseValues(resetLexer) + return nil + } + + // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. + // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps + ct := int64(p.val * 1000.0) + p.setCTParseValues(ct, currFamilyLsetHash, currName, true, resetLexer) + return &ct + } +} + +// setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found. +// This is useful to prevent re-parsing the same series again and early return the CT value. +func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, visitedName string, skipCTSeries bool, resetLexer *openMetricsLexer) { + p.ct = ct + p.l = resetLexer + p.ctHashSet = ctHashSet + p.visitedName = visitedName + p.skipCTSeries = skipCTSeries +} + +// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called. +func (p *OpenMetricsParser) resetCTParseValues(resetLexer *openMetricsLexer) { + p.l = resetLexer + p.ct = 0 + p.ctHashSet = 0 + p.visitedName = "" + p.skipCTSeries = true +} + +// findBaseMetricName returns the metric name without reserved suffixes such as "_created", +// "_sum", etc. based on the OpenMetrics specification found at +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md. +// If no suffix is found, the original name is returned. +func findBaseMetricName(name string) string { + suffixes := []string{"_created", "_count", "_sum", "_bucket", "_total", "_gcount", "_gsum", "_info"} + for _, suffix := range suffixes { + if strings.HasSuffix(name, suffix) { + return strings.TrimSuffix(name, suffix) + } + } + return name +} + +// typeRequiresCT returns true if the metric type requires a _created timestamp. +func typeRequiresCT(t model.MetricType) bool { + switch t { + case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: + return true + default: + return false + } +} + // nextToken returns the next token from the openMetricsLexer. func (p *OpenMetricsParser) nextToken() token { tok := p.l.Lex() @@ -225,8 +390,8 @@ func (p *OpenMetricsParser) parseError(exp string, got token) error { return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) } -// Next advances the parser to the next sample. It returns false if no -// more samples were read or an error occurred. +// Next advances the parser to the next sample. +// It returns (EntryInvalid, io.EOF) if no samples were read. func (p *OpenMetricsParser) Next() (Entry, error) { var err error @@ -248,7 +413,13 @@ func (p *OpenMetricsParser) Next() (Entry, error) { case tHelp, tType, tUnit: switch t2 := p.nextToken(); t2 { case tMName: - p.offsets = append(p.offsets, p.l.start, p.l.i) + mStart := p.l.start + mEnd := p.l.i + if p.l.b[mStart] == '"' && p.l.b[mEnd-1] == '"' { + mStart++ + mEnd-- + } + p.offsets = append(p.offsets, mStart, mEnd) default: return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) } @@ -266,21 +437,21 @@ func (p *OpenMetricsParser) Next() (Entry, error) { case tType: switch s := yoloString(p.text); s { case "counter": - p.mtype = MetricTypeCounter + p.mtype = model.MetricTypeCounter case "gauge": - p.mtype = MetricTypeGauge + p.mtype = model.MetricTypeGauge case "histogram": - p.mtype = MetricTypeHistogram + p.mtype = model.MetricTypeHistogram case "gaugehistogram": - p.mtype = MetricTypeGaugeHistogram + p.mtype = model.MetricTypeGaugeHistogram case "summary": - p.mtype = MetricTypeSummary + p.mtype = model.MetricTypeSummary case "info": - p.mtype = MetricTypeInfo + p.mtype = model.MetricTypeInfo case "stateset": - p.mtype = MetricTypeStateset + p.mtype = model.MetricTypeStateset case "unknown": - p.mtype = MetricTypeUnknown + p.mtype = model.MetricTypeUnknown default: return EntryInvalid, fmt.Errorf("invalid metric type %q", s) } @@ -305,59 +476,46 @@ func (p *OpenMetricsParser) Next() (Entry, error) { return EntryUnit, nil } + case tBraceOpen: + // We found a brace, so make room for the eventual metric name. If these + // values aren't updated, then the metric name was not set inside the + // braces and we can return an error. + if len(p.offsets) == 0 { + p.offsets = []int{-1, -1} + } + if p.offsets, err = p.parseLVals(p.offsets, false); err != nil { + return EntryInvalid, err + } + + p.series = p.l.b[p.start:p.l.i] + if err := p.parseSeriesEndOfLine(p.nextToken()); err != nil { + return EntryInvalid, err + } + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() + } + return EntrySeries, nil case tMName: - p.offsets = append(p.offsets, p.l.i) + p.offsets = append(p.offsets, p.start, p.l.i) p.series = p.l.b[p.start:p.l.i] t2 := p.nextToken() if t2 == tBraceOpen { - p.offsets, err = p.parseLVals(p.offsets) + p.offsets, err = p.parseLVals(p.offsets, false) if err != nil { return EntryInvalid, err } p.series = p.l.b[p.start:p.l.i] t2 = p.nextToken() } - p.val, err = p.getFloatValue(t2, "metric") - if err != nil { + + if err := p.parseSeriesEndOfLine(t2); err != nil { return EntryInvalid, err } - - p.hasTS = false - switch t2 := p.nextToken(); t2 { - case tEOF: - return EntryInvalid, errors.New("data does not end with # EOF") - case tLinebreak: - break - case tComment: - if err := p.parseComment(); err != nil { - return EntryInvalid, err - } - case tTimestamp: - p.hasTS = true - var ts float64 - // A float is enough to hold what we need for millisecond resolution. - if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) - } - if math.IsNaN(ts) || math.IsInf(ts, 0) { - return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) - } - p.ts = int64(ts * 1000) - switch t3 := p.nextToken(); t3 { - case tLinebreak: - case tComment: - if err := p.parseComment(); err != nil { - return EntryInvalid, err - } - default: - return EntryInvalid, p.parseError("expected next entry after timestamp", t3) - } - default: - return EntryInvalid, p.parseError("expected timestamp or # symbol", t2) + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() } return EntrySeries, nil - default: err = p.parseError("expected a valid start token", t) } @@ -367,7 +525,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { func (p *OpenMetricsParser) parseComment() error { var err error // Parse the labels. - p.eOffsets, err = p.parseLVals(p.eOffsets) + p.eOffsets, err = p.parseLVals(p.eOffsets, true) if err != nil { return err } @@ -391,7 +549,7 @@ func (p *OpenMetricsParser) parseComment() error { var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { return fmt.Errorf("invalid exemplar timestamp %f", ts) @@ -408,38 +566,47 @@ func (p *OpenMetricsParser) parseComment() error { return nil } -func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { - first := true +func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, error) { + t := p.nextToken() for { - t := p.nextToken() + curTStart := p.l.start + curTI := p.l.i switch t { case tBraceClose: return offsets, nil - case tComma: - if first { - return nil, p.parseError("expected label name or left brace", t) - } - t = p.nextToken() - if t != tLName { + case tLName: + case tQString: + default: + return nil, p.parseError("expected label name", t) + } + + t = p.nextToken() + // A quoted string followed by a comma or brace is a metric name. Set the + // offsets and continue processing. If this is an exemplar, this format + // is not allowed. + if t == tComma || t == tBraceClose { + if isExemplar { return nil, p.parseError("expected label name", t) } - case tLName: - if !first { - return nil, p.parseError("expected comma", t) + if offsets[0] != -1 || offsets[1] != -1 { + return nil, fmt.Errorf("metric name already set while parsing: %q", p.l.b[p.start:p.l.i]) } - default: - if first { - return nil, p.parseError("expected label name or left brace", t) + offsets[0] = curTStart + 1 + offsets[1] = curTI - 1 + if t == tBraceClose { + return offsets, nil } - return nil, p.parseError("expected comma or left brace", t) - + t = p.nextToken() + continue } - first = false - // t is now a label name. - - offsets = append(offsets, p.l.start, p.l.i) + // We have a label name, and it might be quoted. + if p.l.b[curTStart] == '"' { + curTStart++ + curTI-- + } + offsets = append(offsets, curTStart, curTI) - if t := p.nextToken(); t != tEqual { + if t != tEqual { return nil, p.parseError("expected equal", t) } if t := p.nextToken(); t != tLValue { @@ -452,16 +619,84 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { // The openMetricsLexer ensures the value string is quoted. Strip first // and last character. offsets = append(offsets, p.l.start+1, p.l.i-1) + + // Free trailing commas are allowed. + t = p.nextToken() + if t == tComma { + t = p.nextToken() + } else if t != tBraceClose { + return nil, p.parseError("expected comma or brace close", t) + } } } +// isCreatedSeries returns true if the current series is a _created series. +func (p *OpenMetricsParser) isCreatedSeries() bool { + var newLbs labels.Labels + p.Metric(&newLbs) + name := newLbs.Get(model.MetricNameLabel) + if typeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") { + return true + } + return false +} + +// parseSeriesEndOfLine parses the series end of the line (value, optional +// timestamp, commentary, etc.) after the metric name and labels. +// It starts parsing with the provided token. +func (p *OpenMetricsParser) parseSeriesEndOfLine(t token) error { + if p.offsets[0] == -1 { + return fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) + } + + var err error + p.val, err = p.getFloatValue(t, "metric") + if err != nil { + return err + } + + p.hasTS = false + switch t2 := p.nextToken(); t2 { + case tEOF: + return errors.New("data does not end with # EOF") + case tLinebreak: + break + case tComment: + if err := p.parseComment(); err != nil { + return err + } + case tTimestamp: + p.hasTS = true + var ts float64 + // A float is enough to hold what we need for millisecond resolution. + if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { + return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + } + if math.IsNaN(ts) || math.IsInf(ts, 0) { + return fmt.Errorf("invalid timestamp %f", ts) + } + p.ts = int64(ts * 1000) + switch t3 := p.nextToken(); t3 { + case tLinebreak: + case tComment: + if err := p.parseComment(); err != nil { + return err + } + default: + return p.parseError("expected next entry after timestamp", t3) + } + } + + return nil +} + func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { if t != tValue { return 0, p.parseError(fmt.Sprintf("expected value after %v", after), t) } val, err := parseFloat(yoloString(p.l.buf()[1:])) if err != nil { - return 0, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return 0, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } // Ensure canonical NaN value. if math.IsNaN(p.exemplarVal) { diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index d65e4977ef..bbb7c07306 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -16,14 +16,18 @@ package textparse import ( "errors" "io" + "os" "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" ) +func int64p(x int64) *int64 { return &x } + func TestOpenMetricsParse(t *testing.T) { input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary @@ -63,32 +67,62 @@ ss{A="a"} 0 _metric_starting_with_underscore 1 testmetric{_label_starting_with_underscore="foo"} 1 testmetric{label="\"bar\""} 1 +# HELP foo Counter with and without labels to certify CT is parsed for both cases # TYPE foo counter -foo_total 17.0 1520879607.789 # {id="counter-test"} 5` +foo_total 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created 1520872607.123 +foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created{a="b"} 1520872607.123 +# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far +# TYPE bar summary +bar_count 17.0 +bar_sum 324789.3 +bar{quantile="0.95"} 123.7 +bar{quantile="0.99"} 150.0 +bar_created 1520872608.124 +# HELP baz Histogram with the same objective as above's summary +# TYPE baz histogram +baz_bucket{le="0.0"} 0 +baz_bucket{le="+Inf"} 17 +baz_count 17 +baz_sum 324789.3 +baz_created 1520872609.125 +# HELP fizz_created Gauge which shouldn't be parsed as CT +# TYPE fizz_created gauge +fizz_created 17.0 +# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 18 +something_sum 324789.4 +something_created 1520430001 +something_bucket{le="0.0"} 1 +something_bucket{le="+Inf"} 18 +# HELP yum Summary with _created between sum and quantiles +# TYPE yum summary +yum_count 20 +yum_sum 324789.5 +yum_created 1520430003 +yum{quantile="0.95"} 123.7 +yum{quantile="0.99"} 150.0 +# HELP foobar Summary with _created as the first line +# TYPE foobar summary +foobar_count 21 +foobar_created 1520430004 +foobar_sum 324789.6 +foobar{quantile="0.95"} 123.8 +foobar{quantile="0.99"} 150.1` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\n# EOF\n" - int64p := func(x int64) *int64 { return &x } - - exp := []struct { - lset labels.Labels - m string - t *int64 - v float64 - typ MetricType - help string - unit string - comment string - e *exemplar.Exemplar - }{ + exp := []expectedParse{ { m: "go_gc_duration_seconds", help: "A summary of the GC invocation durations.", }, { m: "go_gc_duration_seconds", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { m: "go_gc_duration_seconds", unit: "seconds", @@ -130,7 +164,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` help: "Number of goroutines that currently exist.", }, { m: "go_goroutines", - typ: MetricTypeGauge, + typ: model.MetricTypeGauge, }, { m: `go_goroutines`, v: 33, @@ -138,21 +172,21 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` lset: labels.FromStrings("__name__", "go_goroutines"), }, { m: "hh", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: `hh_bucket{le="+Inf"}`, v: 1, lset: labels.FromStrings("__name__", "hh_bucket", "le", "+Inf"), }, { m: "gh", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { m: `gh_bucket{le="+Inf"}`, v: 1, lset: labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"), }, { m: "hhh", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: `hhh_bucket{le="+Inf"}`, v: 1, @@ -165,7 +199,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4}, }, { m: "ggh", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { m: `ggh_bucket{le="+Inf"}`, v: 1, @@ -178,7 +212,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, }, { m: "smr_seconds", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { m: `smr_seconds_count`, v: 2, @@ -191,14 +225,14 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}, }, { m: "ii", - typ: MetricTypeInfo, + typ: model.MetricTypeInfo, }, { m: `ii{foo="bar"}`, v: 1, lset: labels.FromStrings("__name__", "ii", "foo", "bar"), }, { m: "ss", - typ: MetricTypeStateset, + typ: model.MetricTypeStateset, }, { m: `ss{ss="foo"}`, v: 1, @@ -213,7 +247,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` lset: labels.FromStrings("A", "a", "__name__", "ss"), }, { m: "un", - typ: MetricTypeUnknown, + typ: model.MetricTypeUnknown, }, { m: "_metric_starting_with_underscore", v: 1, @@ -226,15 +260,166 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` m: "testmetric{label=\"\\\"bar\\\"\"}", v: 1, lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), + }, { + m: "foo", + help: "Counter with and without labels to certify CT is parsed for both cases", }, { m: "foo", - typ: MetricTypeCounter, + typ: model.MetricTypeCounter, }, { m: "foo_total", v: 17, lset: labels.FromStrings("__name__", "foo_total"), t: int64p(1520879607789), e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + ct: int64p(1520872607123), + }, { + m: `foo_total{a="b"}`, + v: 17.0, + lset: labels.FromStrings("__name__", "foo_total", "a", "b"), + t: int64p(1520879607789), + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + ct: int64p(1520872607123), + }, { + m: "bar", + help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", + }, { + m: "bar", + typ: model.MetricTypeSummary, + }, { + m: "bar_count", + v: 17.0, + lset: labels.FromStrings("__name__", "bar_count"), + ct: int64p(1520872608124), + }, { + m: "bar_sum", + v: 324789.3, + lset: labels.FromStrings("__name__", "bar_sum"), + ct: int64p(1520872608124), + }, { + m: `bar{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), + ct: int64p(1520872608124), + }, { + m: `bar{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), + ct: int64p(1520872608124), + }, { + m: "baz", + help: "Histogram with the same objective as above's summary", + }, { + m: "baz", + typ: model.MetricTypeHistogram, + }, { + m: `baz_bucket{le="0.0"}`, + v: 0, + lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), + ct: int64p(1520872609125), + }, { + m: `baz_bucket{le="+Inf"}`, + v: 17, + lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), + ct: int64p(1520872609125), + }, { + m: `baz_count`, + v: 17, + lset: labels.FromStrings("__name__", "baz_count"), + ct: int64p(1520872609125), + }, { + m: `baz_sum`, + v: 324789.3, + lset: labels.FromStrings("__name__", "baz_sum"), + ct: int64p(1520872609125), + }, { + m: "fizz_created", + help: "Gauge which shouldn't be parsed as CT", + }, { + m: "fizz_created", + typ: model.MetricTypeGauge, + }, { + m: `fizz_created`, + v: 17, + lset: labels.FromStrings("__name__", "fizz_created"), + }, { + m: "something", + help: "Histogram with _created between buckets and summary", + }, { + m: "something", + typ: model.MetricTypeHistogram, + }, { + m: `something_count`, + v: 18, + lset: labels.FromStrings("__name__", "something_count"), + ct: int64p(1520430001000), + }, { + m: `something_sum`, + v: 324789.4, + lset: labels.FromStrings("__name__", "something_sum"), + ct: int64p(1520430001000), + }, { + m: `something_bucket{le="0.0"}`, + v: 1, + lset: labels.FromStrings("__name__", "something_bucket", "le", "0.0"), + ct: int64p(1520430001000), + }, { + m: `something_bucket{le="+Inf"}`, + v: 18, + lset: labels.FromStrings("__name__", "something_bucket", "le", "+Inf"), + ct: int64p(1520430001000), + }, { + m: "yum", + help: "Summary with _created between sum and quantiles", + }, { + m: "yum", + typ: model.MetricTypeSummary, + }, { + m: `yum_count`, + v: 20, + lset: labels.FromStrings("__name__", "yum_count"), + ct: int64p(1520430003000), + }, { + m: `yum_sum`, + v: 324789.5, + lset: labels.FromStrings("__name__", "yum_sum"), + ct: int64p(1520430003000), + }, { + m: `yum{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"), + ct: int64p(1520430003000), + }, { + m: `yum{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"), + ct: int64p(1520430003000), + }, { + m: "foobar", + help: "Summary with _created as the first line", + }, { + m: "foobar", + typ: model.MetricTypeSummary, + }, { + m: `foobar_count`, + v: 21, + lset: labels.FromStrings("__name__", "foobar_count"), + ct: int64p(1520430004000), + }, { + m: `foobar_sum`, + v: 324789.6, + lset: labels.FromStrings("__name__", "foobar_sum"), + ct: int64p(1520430004000), + }, { + m: `foobar{quantile="0.95"}`, + v: 123.8, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"), + ct: int64p(1520430004000), + }, { + m: `foobar{quantile="0.99"}`, + v: 150.1, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"), + ct: int64p(1520430004000), }, { m: "metric", help: "foo\x00bar", @@ -245,58 +430,82 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` }, } - p := NewOpenMetricsParser([]byte(input)) - i := 0 - - var res labels.Labels - - for { - et, err := p.Next() - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - - switch et { - case EntrySeries: - m, ts, v := p.Series() - - var e exemplar.Exemplar - p.Metric(&res) - found := p.Exemplar(&e) - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].t, ts) - require.Equal(t, exp[i].v, v) - require.Equal(t, exp[i].lset, res) - if exp[i].e == nil { - require.Equal(t, false, found) - } else { - require.Equal(t, true, found) - require.Equal(t, *exp[i].e, e) - } - - case EntryType: - m, typ := p.Type() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].typ, typ) + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + checkParseResultsWithCT(t, p, exp, true) +} - case EntryHelp: - m, h := p.Help() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].help, string(h)) +func TestUTF8OpenMetricsParse(t *testing.T) { + oldValidationScheme := model.NameValidationScheme + model.NameValidationScheme = model.UTF8Validation + defer func() { + model.NameValidationScheme = oldValidationScheme + }() - case EntryUnit: - m, u := p.Unit() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].unit, string(u)) + input := `# HELP "go.gc_duration_seconds" A summary of the GC invocation durations. +# TYPE "go.gc_duration_seconds" summary +# UNIT "go.gc_duration_seconds" seconds +{"go.gc_duration_seconds",quantile="0"} 4.9351e-05 +{"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05 +{"go.gc_duration_seconds_created"} 1520872607.123 +{"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 +{"http.status",q="0.9",a="b"} 8.3835e-05 +{"http.status",q="0.9",a="b"} 8.3835e-05 +{q="0.9","http.status",a="b"} 8.3835e-05 +{"go.gc_duration_seconds_sum"} 0.004304266 +{"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"} 10.0` - case EntryComment: - require.Equal(t, exp[i].comment, string(p.Comment())) - } + input += "\n# EOF\n" - i++ + exp := []expectedParse{ + { + m: "go.gc_duration_seconds", + help: "A summary of the GC invocation durations.", + }, { + m: "go.gc_duration_seconds", + typ: model.MetricTypeSummary, + }, { + m: "go.gc_duration_seconds", + unit: "seconds", + }, { + m: `{"go.gc_duration_seconds",quantile="0"}`, + v: 4.9351e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), + ct: int64p(1520872607123), + }, { + m: `{"go.gc_duration_seconds",quantile="0.25"}`, + v: 7.424100000000001e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), + ct: int64p(1520872607123), + }, { + m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.5", "a", "b"), + }, { + m: `{"http.status",q="0.9",a="b"}`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "http.status", "q", "0.9", "a", "b"), + }, { + m: `{"http.status",q="0.9",a="b"}`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "http.status", "q", "0.9", "a", "b"), + }, { + m: `{q="0.9","http.status",a="b"}`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "http.status", "q", "0.9", "a", "b"), + }, { + m: `{"go.gc_duration_seconds_sum"}`, + v: 0.004304266, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds_sum"), + }, { + m: `{"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"}`, + v: 10.0, + lset: labels.FromStrings("__name__", `Heizölrückstoßabdämpfung 10€ metric with "interesting" {character +choices}`, "strange©™\n'quoted' \"name\"", "6"), + }, } - require.Equal(t, len(exp), i) + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + checkParseResultsWithCT(t, p, exp, true) } func TestOpenMetricsParseErrors(t *testing.T) { @@ -455,17 +664,13 @@ func TestOpenMetricsParseErrors(t *testing.T) { input: "a{b='c'} 1\n# EOF\n", err: "expected label value, got \"'\" (\"INVALID\") while parsing: \"a{b='\"", }, - { - input: "a{b=\"c\",} 1\n# EOF\n", - err: "expected label name, got \"} \" (\"BCLOSE\") while parsing: \"a{b=\\\"c\\\",} \"", - }, { input: "a{,b=\"c\"} 1\n# EOF\n", - err: "expected label name or left brace, got \",b\" (\"COMMA\") while parsing: \"a{,b\"", + err: "expected label name, got \",b\" (\"COMMA\") while parsing: \"a{,b\"", }, { input: "a{b=\"c\"d=\"e\"} 1\n# EOF\n", - err: "expected comma, got \"d=\" (\"LNAME\") while parsing: \"a{b=\\\"c\\\"d=\"", + err: "expected comma or brace close, got \"d=\" (\"LNAME\") while parsing: \"a{b=\\\"c\\\"d=\"", }, { input: "a{b=\"c\",,d=\"e\"} 1\n# EOF\n", @@ -477,12 +682,24 @@ func TestOpenMetricsParseErrors(t *testing.T) { }, { input: "a{\xff=\"foo\"} 1\n# EOF\n", - err: "expected label name or left brace, got \"\\xff\" (\"INVALID\") while parsing: \"a{\\xff\"", + err: "expected label name, got \"\\xff\" (\"INVALID\") while parsing: \"a{\\xff\"", }, { input: "a{b=\"\xff\"} 1\n# EOF\n", err: "invalid UTF-8 label value: \"\\\"\\xff\\\"\"", }, + { + input: `{"a","b = "c"} +# EOF +`, + err: "expected equal, got \"c\\\"\" (\"LNAME\") while parsing: \"{\\\"a\\\",\\\"b = \\\"c\\\"\"", + }, + { + input: `{"a",b\nc="d"} 1 +# EOF +`, + err: "expected equal, got \"\\\\\" (\"INVALID\") while parsing: \"{\\\"a\\\",b\\\\\"", + }, { input: "a true\n", err: "strconv.ParseFloat: parsing \"true\": invalid syntax while parsing: \"a true\"", @@ -493,7 +710,7 @@ func TestOpenMetricsParseErrors(t *testing.T) { }, { input: "empty_label_name{=\"\"} 0\n# EOF\n", - err: "expected label name or left brace, got \"=\\\"\" (\"EQUAL\") while parsing: \"empty_label_name{=\\\"\"", + err: "expected label name, got \"=\\\"\" (\"EQUAL\") while parsing: \"empty_label_name{=\\\"\"", }, { input: "foo 1_2\n\n# EOF\n", @@ -523,6 +740,14 @@ func TestOpenMetricsParseErrors(t *testing.T) { input: `custom_metric_total 1 # {aa="bb"}`, err: "expected value after exemplar labels, got \"}\" (\"EOF\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"}\"", }, + { + input: `custom_metric_total 1 # {bb}`, + err: "expected label name, got \"}\" (\"BCLOSE\") while parsing: \"custom_metric_total 1 # {bb}\"", + }, + { + input: `custom_metric_total 1 # {bb, a="dd"}`, + err: "expected label name, got \", \" (\"COMMA\") while parsing: \"custom_metric_total 1 # {bb, \"", + }, { input: `custom_metric_total 1 # {aa="bb",,cc="dd"} 1`, err: "expected label name, got \",c\" (\"COMMA\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\",,c\"", @@ -549,7 +774,7 @@ func TestOpenMetricsParseErrors(t *testing.T) { }, { input: `{b="c",} 1`, - err: "expected a valid start token, got \"{\" (\"INVALID\") while parsing: \"{\"", + err: "metric name not set while parsing: \"{b=\\\"c\\\",} 1\"", }, { input: `a 1 NaN`, @@ -571,14 +796,10 @@ func TestOpenMetricsParseErrors(t *testing.T) { input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf", err: `invalid exemplar timestamp -Inf`, }, - { - input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf", - err: `invalid exemplar timestamp +Inf`, - }, } for i, c := range cases { - p := NewOpenMetricsParser([]byte(c.input)) + p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) var err error for err == nil { _, err = p.Next() @@ -643,7 +864,7 @@ func TestOMNullByteHandling(t *testing.T) { } for i, c := range cases { - p := NewOpenMetricsParser([]byte(c.input)) + p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) var err error for err == nil { _, err = p.Next() @@ -657,3 +878,126 @@ func TestOMNullByteHandling(t *testing.T) { require.Equal(t, c.err, err.Error(), "test %d", i) } } + +// While not desirable, there are cases were CT fails to parse and +// these tests show them. +// TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. +func TestCTParseFailures(t *testing.T) { + input := `# HELP thing Histogram with _created as first line +# TYPE thing histogram +thing_created 1520872607.123 +thing_count 17 +thing_sum 324789.3 +thing_bucket{le="0.0"} 0 +thing_bucket{le="+Inf"} 17` + + input += "\n# EOF\n" + + int64p := func(x int64) *int64 { return &x } + + type expectCT struct { + m string + ct *int64 + typ model.MetricType + help string + isErr bool + } + + exp := []expectCT{ + { + m: "thing", + help: "Histogram with _created as first line", + isErr: false, + }, { + m: "thing", + typ: model.MetricTypeHistogram, + isErr: false, + }, { + m: `thing_count`, + ct: int64p(1520872607123), + isErr: true, + }, { + m: `thing_sum`, + ct: int64p(1520872607123), + isErr: true, + }, { + m: `thing_bucket{le="0.0"}`, + ct: int64p(1520872607123), + isErr: true, + }, { + m: `thing_bucket{le="+Inf"}`, + ct: int64p(1520872607123), + isErr: true, + }, + } + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + i := 0 + + var res labels.Labels + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + switch et { + case EntrySeries: + p.Metric(&res) + + if ct := p.CreatedTimestamp(); exp[i].isErr { + require.Nil(t, ct) + } else { + require.Equal(t, *exp[i].ct, *ct) + } + default: + i++ + continue + } + i++ + } +} + +func BenchmarkOMParseCreatedTimestamp(b *testing.B) { + for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ + "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st) + }, + "openmetrics-skip-ct": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) + }, + } { + f, err := os.Open("omtestdata.txt") + require.NoError(b, err) + defer f.Close() + + buf, err := io.ReadAll(f) + require.NoError(b, err) + + b.Run(parserName+"/parse-ct/"+"omtestdata.txt", func(b *testing.B) { + b.SetBytes(int64(len(buf) / promtestdataSampleCount)) + b.ReportAllocs() + b.ResetTimer() + + st := labels.NewSymbolTable() + for i := 0; i < b.N; i += promtestdataSampleCount { + p := parser(buf, st) + + Outer: + for i < b.N { + t, err := p.Next() + switch t { + case EntryInvalid: + if errors.Is(err, io.EOF) { + break Outer + } + b.Fatal(err) + case EntrySeries: + p.CreatedTimestamp() + } + } + } + }) + } +} diff --git a/model/textparse/promlex.l b/model/textparse/promlex.l index c1bc8e7766..e9fa1fb71c 100644 --- a/model/textparse/promlex.l +++ b/model/textparse/promlex.l @@ -66,12 +66,15 @@ C [^\n] # return l.consumeComment() HELP[\t ]+ l.state = sMeta1; return tHelp TYPE[\t ]+ l.state = sMeta1; return tType +\"(\\.|[^\\"])*\" l.state = sMeta2; return tMName {M}({M}|{D})* l.state = sMeta2; return tMName {C}* l.state = sInit; return tText {M}({M}|{D})* l.state = sValue; return tMName \{ l.state = sLabels; return tBraceOpen +\{ l.state = sLabels; return tBraceOpen {L}({L}|{D})* return tLName +\"(\\.|[^\\"])*\" l.state = sLabels; return tQString \} l.state = sValue; return tBraceClose = l.state = sLValue; return tEqual , return tComma diff --git a/model/textparse/promlex.l.go b/model/textparse/promlex.l.go index 4076aae610..a083e5549b 100644 --- a/model/textparse/promlex.l.go +++ b/model/textparse/promlex.l.go @@ -51,19 +51,19 @@ yystate0: case 0: // start condition: INITIAL goto yystart1 case 1: // start condition: sComment - goto yystart8 + goto yystart9 case 2: // start condition: sMeta1 - goto yystart19 + goto yystart20 case 3: // start condition: sMeta2 - goto yystart21 + goto yystart25 case 4: // start condition: sLabels - goto yystart24 + goto yystart28 case 5: // start condition: sLValue - goto yystart29 + goto yystart36 case 6: // start condition: sValue - goto yystart33 + goto yystart40 case 7: // start condition: sTimestamp - goto yystart36 + goto yystart43 } yystate1: @@ -82,6 +82,8 @@ yystart1: goto yystate3 case c == '\x00': goto yystate2 + case c == '{': + goto yystate8 } yystate2: @@ -123,297 +125,357 @@ yystate7: c = l.next() switch { default: - goto yyrule10 + goto yyrule11 case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': goto yystate7 } yystate8: c = l.next() -yystart8: + goto yyrule13 + +yystate9: + c = l.next() +yystart9: switch { default: goto yyabort case c == 'H': - goto yystate9 + goto yystate10 case c == 'T': - goto yystate14 + goto yystate15 case c == '\t' || c == ' ': goto yystate3 } -yystate9: +yystate10: c = l.next() switch { default: goto yyabort case c == 'E': - goto yystate10 + goto yystate11 } -yystate10: +yystate11: c = l.next() switch { default: goto yyabort case c == 'L': - goto yystate11 + goto yystate12 } -yystate11: +yystate12: c = l.next() switch { default: goto yyabort case c == 'P': - goto yystate12 + goto yystate13 } -yystate12: +yystate13: c = l.next() switch { default: goto yyabort case c == '\t' || c == ' ': - goto yystate13 + goto yystate14 } -yystate13: +yystate14: c = l.next() switch { default: goto yyrule6 case c == '\t' || c == ' ': - goto yystate13 + goto yystate14 } -yystate14: +yystate15: c = l.next() switch { default: goto yyabort case c == 'Y': - goto yystate15 + goto yystate16 } -yystate15: +yystate16: c = l.next() switch { default: goto yyabort case c == 'P': - goto yystate16 + goto yystate17 } -yystate16: +yystate17: c = l.next() switch { default: goto yyabort case c == 'E': - goto yystate17 + goto yystate18 } -yystate17: +yystate18: c = l.next() switch { default: goto yyabort case c == '\t' || c == ' ': - goto yystate18 + goto yystate19 } -yystate18: +yystate19: c = l.next() switch { default: goto yyrule7 case c == '\t' || c == ' ': - goto yystate18 + goto yystate19 } -yystate19: +yystate20: c = l.next() -yystart19: +yystart20: switch { default: goto yyabort + case c == '"': + goto yystate21 case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate20 + goto yystate24 case c == '\t' || c == ' ': goto yystate3 } -yystate20: +yystate21: c = l.next() switch { default: - goto yyrule8 - case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate20 + goto yyabort + case c == '"': + goto yystate22 + case c == '\\': + goto yystate23 + case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate21 } -yystate21: +yystate22: + c = l.next() + goto yyrule8 + +yystate23: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate21 + } + +yystate24: c = l.next() -yystart21: switch { default: goto yyrule9 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate24 + } + +yystate25: + c = l.next() +yystart25: + switch { + default: + goto yyrule10 case c == '\t' || c == ' ': - goto yystate23 + goto yystate27 case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate22 + goto yystate26 } -yystate22: +yystate26: c = l.next() switch { default: - goto yyrule9 + goto yyrule10 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': - goto yystate22 + goto yystate26 } -yystate23: +yystate27: c = l.next() switch { default: goto yyrule3 case c == '\t' || c == ' ': - goto yystate23 + goto yystate27 case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate22 + goto yystate26 } -yystate24: +yystate28: c = l.next() -yystart24: +yystart28: switch { default: goto yyabort + case c == '"': + goto yystate29 case c == ',': - goto yystate25 + goto yystate32 case c == '=': - goto yystate26 + goto yystate33 case c == '\t' || c == ' ': goto yystate3 case c == '}': - goto yystate28 + goto yystate35 case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate27 + goto yystate34 } -yystate25: +yystate29: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate30 + case c == '\\': + goto yystate31 + case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate29 + } + +yystate30: c = l.next() goto yyrule15 -yystate26: +yystate31: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate29 + } + +yystate32: c = l.next() - goto yyrule14 + goto yyrule18 -yystate27: +yystate33: + c = l.next() + goto yyrule17 + +yystate34: c = l.next() switch { default: - goto yyrule12 + goto yyrule14 case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate27 + goto yystate34 } -yystate28: +yystate35: c = l.next() - goto yyrule13 + goto yyrule16 -yystate29: +yystate36: c = l.next() -yystart29: +yystart36: switch { default: goto yyabort case c == '"': - goto yystate30 + goto yystate37 case c == '\t' || c == ' ': goto yystate3 } -yystate30: +yystate37: c = l.next() switch { default: goto yyabort case c == '"': - goto yystate31 + goto yystate38 case c == '\\': - goto yystate32 + goto yystate39 case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': - goto yystate30 + goto yystate37 } -yystate31: +yystate38: c = l.next() - goto yyrule16 + goto yyrule19 -yystate32: +yystate39: c = l.next() switch { default: goto yyabort case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': - goto yystate30 + goto yystate37 } -yystate33: +yystate40: c = l.next() -yystart33: +yystart40: switch { default: goto yyabort case c == '\t' || c == ' ': goto yystate3 case c == '{': - goto yystate35 + goto yystate42 case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ': - goto yystate34 + goto yystate41 } -yystate34: +yystate41: c = l.next() switch { default: - goto yyrule17 + goto yyrule20 case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ': - goto yystate34 + goto yystate41 } -yystate35: +yystate42: c = l.next() - goto yyrule11 + goto yyrule12 -yystate36: +yystate43: c = l.next() -yystart36: +yystart43: switch { default: goto yyabort case c == '\n': - goto yystate37 + goto yystate44 case c == '\t' || c == ' ': goto yystate3 case c >= '0' && c <= '9': - goto yystate38 + goto yystate45 } -yystate37: +yystate44: c = l.next() - goto yyrule19 + goto yyrule22 -yystate38: +yystate45: c = l.next() switch { default: - goto yyrule18 + goto yyrule21 case c >= '0' && c <= '9': - goto yystate38 + goto yystate45 } yyrule1: // \0 @@ -451,67 +513,85 @@ yyrule7: // TYPE[\t ]+ return tType goto yystate0 } -yyrule8: // {M}({M}|{D})* +yyrule8: // \"(\\.|[^\\"])*\" + { + l.state = sMeta2 + return tMName + goto yystate0 + } +yyrule9: // {M}({M}|{D})* { l.state = sMeta2 return tMName goto yystate0 } -yyrule9: // {C}* +yyrule10: // {C}* { l.state = sInit return tText goto yystate0 } -yyrule10: // {M}({M}|{D})* +yyrule11: // {M}({M}|{D})* { l.state = sValue return tMName goto yystate0 } -yyrule11: // \{ +yyrule12: // \{ { l.state = sLabels return tBraceOpen goto yystate0 } -yyrule12: // {L}({L}|{D})* +yyrule13: // \{ + { + l.state = sLabels + return tBraceOpen + goto yystate0 + } +yyrule14: // {L}({L}|{D})* { return tLName } -yyrule13: // \} +yyrule15: // \"(\\.|[^\\"])*\" + { + l.state = sLabels + return tQString + goto yystate0 + } +yyrule16: // \} { l.state = sValue return tBraceClose goto yystate0 } -yyrule14: // = +yyrule17: // = { l.state = sLValue return tEqual goto yystate0 } -yyrule15: // , +yyrule18: // , { return tComma } -yyrule16: // \"(\\.|[^\\"])*\" +yyrule19: // \"(\\.|[^\\"])*\" { l.state = sLabels return tLValue goto yystate0 } -yyrule17: // [^{ \t\n]+ +yyrule20: // [^{ \t\n]+ { l.state = sTimestamp return tValue goto yystate0 } -yyrule18: // {D}+ +yyrule21: // {D}+ { return tTimestamp } -yyrule19: // \n +yyrule22: // \n if true { // avoid go vet determining the below panic will not be reached l.state = sInit return tLinebreak @@ -520,9 +600,7 @@ yyrule19: // \n panic("unreachable") yyabort: // no lexem recognized - // // silence unused label errors for build and satisfy go vet reachability analysis - // { if false { goto yyabort @@ -534,25 +612,25 @@ yyabort: // no lexem recognized goto yystate1 } if false { - goto yystate8 + goto yystate9 } if false { - goto yystate19 + goto yystate20 } if false { - goto yystate21 + goto yystate25 } if false { - goto yystate24 + goto yystate28 } if false { - goto yystate29 + goto yystate36 } if false { - goto yystate33 + goto yystate40 } if false { - goto yystate36 + goto yystate43 } } diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index 94338a6660..5759769279 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -26,6 +26,8 @@ import ( "unicode/utf8" "unsafe" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -55,6 +57,7 @@ const ( tComment tBlank tMName + tQString tBraceOpen tBraceClose tLName @@ -91,6 +94,8 @@ func (t token) String() string { return "BLANK" case tMName: return "MNAME" + case tQString: + return "QSTRING" case tBraceOpen: return "BOPEN" case tBraceClose: @@ -146,17 +151,26 @@ type PromParser struct { builder labels.ScratchBuilder series []byte text []byte - mtype MetricType + mtype model.MetricType val float64 ts int64 hasTS bool start int + // offsets is a list of offsets into series that describe the positions + // of the metric name and label names and values for this series. + // p.offsets[0] is the start character of the metric name. + // p.offsets[1] is the end of the metric name. + // Subsequently, p.offsets is a pair of pair of offsets for the positions + // of the label name and value start and end characters. offsets []int } // NewPromParser returns a new parser of the byte slice. -func NewPromParser(b []byte) Parser { - return &PromParser{l: &promlexer{b: append(b, '\n')}} +func NewPromParser(b []byte, st *labels.SymbolTable) Parser { + return &PromParser{ + l: &promlexer{b: append(b, '\n')}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + } } // Series returns the bytes of the series, the timestamp if set, and the value @@ -190,7 +204,7 @@ func (p *PromParser) Help() ([]byte, []byte) { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. -func (p *PromParser) Type() ([]byte, MetricType) { +func (p *PromParser) Type() ([]byte, model.MetricType) { return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype } @@ -216,20 +230,17 @@ func (p *PromParser) Metric(l *labels.Labels) string { s := string(p.series) p.builder.Reset() - p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start]) + metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start]) + p.builder.Add(labels.MetricName, metricName) - for i := 1; i < len(p.offsets); i += 4 { + for i := 2; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start b := p.offsets[i+1] - p.start + label := unreplace(s[a:b]) c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start - - value := s[c:d] - // Replacer causes allocations. Replace only when necessary. - if strings.IndexByte(s[c:d], byte('\\')) >= 0 { - value = lvalReplacer.Replace(value) - } - p.builder.Add(s[a:b], value) + value := unreplace(s[c:d]) + p.builder.Add(label, value) } p.builder.Sort() @@ -245,6 +256,12 @@ func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } +// CreatedTimestamp returns nil as it's not implemented yet. +// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 +func (p *PromParser) CreatedTimestamp() *int64 { + return nil +} + // nextToken returns the next token from the promlexer. It skips over tabs // and spaces. func (p *PromParser) nextToken() token { @@ -263,8 +280,8 @@ func (p *PromParser) parseError(exp string, got token) error { return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) } -// Next advances the parser to the next sample. It returns false if no -// more samples were read or an error occurred. +// Next advances the parser to the next sample. +// It returns (EntryInvalid, io.EOF) if no samples were read. func (p *PromParser) Next() (Entry, error) { var err error @@ -281,7 +298,13 @@ func (p *PromParser) Next() (Entry, error) { case tHelp, tType: switch t2 := p.nextToken(); t2 { case tMName: - p.offsets = append(p.offsets, p.l.start, p.l.i) + mStart := p.l.start + mEnd := p.l.i + if p.l.b[mStart] == '"' && p.l.b[mEnd-1] == '"' { + mStart++ + mEnd-- + } + p.offsets = append(p.offsets, mStart, mEnd) default: return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) } @@ -293,21 +316,21 @@ func (p *PromParser) Next() (Entry, error) { p.text = []byte{} } default: - return EntryInvalid, fmt.Errorf("expected text in %s", t.String()) + return EntryInvalid, fmt.Errorf("expected text in %s, got %v", t.String(), t2.String()) } switch t { case tType: switch s := yoloString(p.text); s { case "counter": - p.mtype = MetricTypeCounter + p.mtype = model.MetricTypeCounter case "gauge": - p.mtype = MetricTypeGauge + p.mtype = model.MetricTypeGauge case "histogram": - p.mtype = MetricTypeHistogram + p.mtype = model.MetricTypeHistogram case "summary": - p.mtype = MetricTypeSummary + p.mtype = model.MetricTypeSummary case "untyped": - p.mtype = MetricTypeUnknown + p.mtype = model.MetricTypeUnknown default: return EntryInvalid, fmt.Errorf("invalid metric type %q", s) } @@ -331,12 +354,24 @@ func (p *PromParser) Next() (Entry, error) { return EntryInvalid, p.parseError("linebreak expected after comment", t) } return EntryComment, nil + case tBraceOpen: + // We found a brace, so make room for the eventual metric name. If these + // values aren't updated, then the metric name was not set inside the + // braces and we can return an error. + if len(p.offsets) == 0 { + p.offsets = []int{-1, -1} + } + if err := p.parseLVals(); err != nil { + return EntryInvalid, err + } + p.series = p.l.b[p.start:p.l.i] + return p.parseMetricSuffix(p.nextToken()) case tMName: - p.offsets = append(p.offsets, p.l.i) + p.offsets = append(p.offsets, p.start, p.l.i) p.series = p.l.b[p.start:p.l.i] - t2 := p.nextToken() + // If there's a brace, consume and parse the label values. if t2 == tBraceOpen { if err := p.parseLVals(); err != nil { return EntryInvalid, err @@ -344,32 +379,7 @@ func (p *PromParser) Next() (Entry, error) { p.series = p.l.b[p.start:p.l.i] t2 = p.nextToken() } - if t2 != tValue { - return EntryInvalid, p.parseError("expected value after metric", t2) - } - if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { - return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) - } - // Ensure canonical NaN value. - if math.IsNaN(p.val) { - p.val = math.Float64frombits(value.NormalNaN) - } - p.hasTS = false - switch t := p.nextToken(); t { - case tLinebreak: - break - case tTimestamp: - p.hasTS = true - if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil { - return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) - } - if t2 := p.nextToken(); t2 != tLinebreak { - return EntryInvalid, p.parseError("expected next entry after timestamp", t2) - } - default: - return EntryInvalid, p.parseError("expected timestamp or new record", t) - } - return EntrySeries, nil + return p.parseMetricSuffix(t2) default: err = p.parseError("expected a valid start token", t) @@ -377,19 +387,43 @@ func (p *PromParser) Next() (Entry, error) { return EntryInvalid, err } +// parseLVals parses the contents inside the braces. func (p *PromParser) parseLVals() error { t := p.nextToken() for { + curTStart := p.l.start + curTI := p.l.i switch t { case tBraceClose: return nil case tLName: + case tQString: default: return p.parseError("expected label name", t) } - p.offsets = append(p.offsets, p.l.start, p.l.i) - if t := p.nextToken(); t != tEqual { + t = p.nextToken() + // A quoted string followed by a comma or brace is a metric name. Set the + // offsets and continue processing. + if t == tComma || t == tBraceClose { + if p.offsets[0] != -1 || p.offsets[1] != -1 { + return fmt.Errorf("metric name already set while parsing: %q", p.l.b[p.start:p.l.i]) + } + p.offsets[0] = curTStart + 1 + p.offsets[1] = curTI - 1 + if t == tBraceClose { + return nil + } + t = p.nextToken() + continue + } + // We have a label name, and it might be quoted. + if p.l.b[curTStart] == '"' { + curTStart++ + curTI-- + } + p.offsets = append(p.offsets, curTStart, curTI) + if t != tEqual { return p.parseError("expected equal", t) } if t := p.nextToken(); t != tLValue { @@ -403,13 +437,51 @@ func (p *PromParser) parseLVals() error { // and last character. p.offsets = append(p.offsets, p.l.start+1, p.l.i-1) - // Free trailing commas are allowed. + // Free trailing commas are allowed. NOTE: this allows spaces between label + // names, unlike in OpenMetrics. It is not clear if this is intended or an + // accidental bug. if t = p.nextToken(); t == tComma { t = p.nextToken() } } } +// parseMetricSuffix parses the end of the line after the metric name and +// labels. It starts parsing with the provided token. +func (p *PromParser) parseMetricSuffix(t token) (Entry, error) { + if p.offsets[0] == -1 { + return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) + } + if t != tValue { + return EntryInvalid, p.parseError("expected value after metric", t) + } + var err error + if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { + return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + } + // Ensure canonical NaN value. + if math.IsNaN(p.val) { + p.val = math.Float64frombits(value.NormalNaN) + } + p.hasTS = false + switch t := p.nextToken(); t { + case tLinebreak: + break + case tTimestamp: + p.hasTS = true + if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil { + return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + } + if t2 := p.nextToken(); t2 != tLinebreak { + return EntryInvalid, p.parseError("expected next entry after timestamp", t2) + } + default: + return EntryInvalid, p.parseError("expected timestamp or new record", t) + } + + return EntrySeries, nil +} + var lvalReplacer = strings.NewReplacer( `\"`, "\"", `\\`, "\\", @@ -421,8 +493,16 @@ var helpReplacer = strings.NewReplacer( `\n`, "\n", ) +func unreplace(s string) string { + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(s, byte('\\')) >= 0 { + return lvalReplacer.Replace(s) + } + return s +} + func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } func parseFloat(s string) (float64, error) { diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 280f39b4f1..4520dfe9a9 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -15,19 +15,36 @@ package textparse import ( "bytes" - "compress/gzip" "errors" "io" "os" + "strings" "testing" + "github.com/klauspost/compress/gzip" + "github.com/stretchr/testify/require" + "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/testutil" ) +type expectedParse struct { + lset labels.Labels + m string + t *int64 + v float64 + typ model.MetricType + help string + unit string + comment string + e *exemplar.Exemplar + ct *int64 +} + func TestPromParse(t *testing.T) { input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary @@ -47,6 +64,7 @@ go_gc_duration_seconds{ quantile="1.0", a="b" } 8.3835e-05 go_gc_duration_seconds { quantile="1.0", a="b" } 8.3835e-05 go_gc_duration_seconds { quantile= "1.0", a= "b", } 8.3835e-05 go_gc_duration_seconds { quantile = "1.0", a = "b" } 8.3835e-05 +go_gc_duration_seconds { quantile = "2.0" a = "b" } 8.3835e-05 go_gc_duration_seconds_count 99 some:aggregate:rate5m{a_b="c"} 1 # HELP go_goroutines Number of goroutines that currently exist. @@ -60,21 +78,13 @@ testmetric{label="\"bar\""} 1` int64p := func(x int64) *int64 { return &x } - exp := []struct { - lset labels.Labels - m string - t *int64 - v float64 - typ MetricType - help string - comment string - }{ + exp := []expectedParse{ { m: "go_gc_duration_seconds", help: "A summary of the GC invocation durations.", }, { m: "go_gc_duration_seconds", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { m: `go_gc_duration_seconds{quantile="0"}`, v: 4.9351e-05, @@ -129,6 +139,11 @@ testmetric{label="\"bar\""} 1` m: `go_gc_duration_seconds { quantile = "1.0", a = "b" }`, v: 8.3835e-05, lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), + }, { + // NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed. + m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"), }, { m: `go_gc_duration_seconds_count`, v: 99, @@ -142,7 +157,7 @@ testmetric{label="\"bar\""} 1` help: "Number of goroutines that currently exist.", }, { m: "go_goroutines", - typ: MetricTypeGauge, + typ: model.MetricTypeGauge, }, { m: `go_goroutines`, v: 33, @@ -170,7 +185,15 @@ testmetric{label="\"bar\""} 1` }, } - p := NewPromParser([]byte(input)) + p := NewPromParser([]byte(input), labels.NewSymbolTable()) + checkParseResults(t, p, exp) +} + +func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { + checkParseResultsWithCT(t, p, exp, false) +} + +func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLinesRemoved bool) { i := 0 var res labels.Labels @@ -188,10 +211,32 @@ testmetric{label="\"bar\""} 1` p.Metric(&res) + if ctLinesRemoved { + // Are CT series skipped? + _, typ := p.Type() + if typeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") { + t.Fatalf("we exped created lines skipped") + } + } + require.Equal(t, exp[i].m, string(m)) require.Equal(t, exp[i].t, ts) require.Equal(t, exp[i].v, v) - require.Equal(t, exp[i].lset, res) + testutil.RequireEqual(t, exp[i].lset, res) + + var e exemplar.Exemplar + found := p.Exemplar(&e) + if exp[i].e == nil { + require.False(t, found) + } else { + require.True(t, found) + testutil.RequireEqual(t, *exp[i].e, e) + } + if ct := p.CreatedTimestamp(); ct != nil { + require.Equal(t, *exp[i].ct, *ct) + } else { + require.Nil(t, exp[i].ct) + } case EntryType: m, typ := p.Type() @@ -203,13 +248,98 @@ testmetric{label="\"bar\""} 1` require.Equal(t, exp[i].m, string(m)) require.Equal(t, exp[i].help, string(h)) + case EntryUnit: + m, u := p.Unit() + require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].unit, string(u)) + case EntryComment: require.Equal(t, exp[i].comment, string(p.Comment())) } i++ } - require.Equal(t, len(exp), i) + require.Len(t, exp, i) +} + +func TestUTF8PromParse(t *testing.T) { + oldValidationScheme := model.NameValidationScheme + model.NameValidationScheme = model.UTF8Validation + defer func() { + model.NameValidationScheme = oldValidationScheme + }() + + input := `# HELP "go.gc_duration_seconds" A summary of the GC invocation durations. +# TYPE "go.gc_duration_seconds" summary +{"go.gc_duration_seconds",quantile="0"} 4.9351e-05 +{"go.gc_duration_seconds",quantile="0.25",} 7.424100000000001e-05 +{"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 +{"go.gc_duration_seconds",quantile="0.8", a="b"} 8.3835e-05 +{"go.gc_duration_seconds", quantile="0.9", a="b"} 8.3835e-05 +{"go.gc_duration_seconds", quantile="1.0", a="b" } 8.3835e-05 +{ "go.gc_duration_seconds", quantile="1.0", a="b" } 8.3835e-05 +{ "go.gc_duration_seconds", quantile= "1.0", a= "b", } 8.3835e-05 +{ "go.gc_duration_seconds", quantile = "1.0", a = "b" } 8.3835e-05 +{"go.gc_duration_seconds_count"} 99 +{"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"} 10.0` + + exp := []expectedParse{ + { + m: "go.gc_duration_seconds", + help: "A summary of the GC invocation durations.", + }, { + m: "go.gc_duration_seconds", + typ: model.MetricTypeSummary, + }, { + m: `{"go.gc_duration_seconds",quantile="0"}`, + v: 4.9351e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), + }, { + m: `{"go.gc_duration_seconds",quantile="0.25",}`, + v: 7.424100000000001e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), + }, { + m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.5", "a", "b"), + }, { + m: `{"go.gc_duration_seconds",quantile="0.8", a="b"}`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.8", "a", "b"), + }, { + m: `{"go.gc_duration_seconds", quantile="0.9", a="b"}`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.9", "a", "b"), + }, { + m: `{"go.gc_duration_seconds", quantile="1.0", a="b" }`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "1.0", "a", "b"), + }, { + m: `{ "go.gc_duration_seconds", quantile="1.0", a="b" }`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "1.0", "a", "b"), + }, { + m: `{ "go.gc_duration_seconds", quantile= "1.0", a= "b", }`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "1.0", "a", "b"), + }, { + m: `{ "go.gc_duration_seconds", quantile = "1.0", a = "b" }`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "1.0", "a", "b"), + }, { + m: `{"go.gc_duration_seconds_count"}`, + v: 99, + lset: labels.FromStrings("__name__", "go.gc_duration_seconds_count"), + }, { + m: `{"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"}`, + v: 10.0, + lset: labels.FromStrings("__name__", `Heizölrückstoßabdämpfung 10€ metric with "interesting" {character +choices}`, "strange©™\n'quoted' \"name\"", "6"), + }, + } + + p := NewPromParser([]byte(input), labels.NewSymbolTable()) + checkParseResults(t, p, exp) } func TestPromParseErrors(t *testing.T) { @@ -237,6 +367,14 @@ func TestPromParseErrors(t *testing.T) { input: "a{b=\"\xff\"} 1\n", err: "invalid UTF-8 label value: \"\\\"\\xff\\\"\"", }, + { + input: `{"a", "b = "c"}`, + err: "expected equal, got \"c\\\"\" (\"LNAME\") while parsing: \"{\\\"a\\\", \\\"b = \\\"c\\\"\"", + }, + { + input: `{"a",b\nc="d"} 1`, + err: "expected equal, got \"\\\\\" (\"INVALID\") while parsing: \"{\\\"a\\\",b\\\\\"", + }, { input: "a true\n", err: "strconv.ParseFloat: parsing \"true\": invalid syntax while parsing: \"a true\"", @@ -267,7 +405,7 @@ func TestPromParseErrors(t *testing.T) { }, { input: `{a="ok"} 1`, - err: "expected a valid start token, got \"{\" (\"INVALID\") while parsing: \"{\"", + err: "metric name not set while parsing: \"{a=\\\"ok\\\"} 1\"", }, { input: "# TYPE #\n#EOF\n", @@ -280,7 +418,7 @@ func TestPromParseErrors(t *testing.T) { } for i, c := range cases { - p := NewPromParser([]byte(c.input)) + p := NewPromParser([]byte(c.input), labels.NewSymbolTable()) var err error for err == nil { _, err = p.Next() @@ -334,7 +472,7 @@ func TestPromNullByteHandling(t *testing.T) { } for i, c := range cases { - p := NewPromParser([]byte(c.input)) + p := NewPromParser([]byte(c.input), labels.NewSymbolTable()) var err error for err == nil { _, err = p.Next() @@ -354,10 +492,12 @@ const ( promtestdataSampleCount = 410 ) -func BenchmarkParse(b *testing.B) { - for parserName, parser := range map[string]func([]byte) Parser{ - "prometheus": NewPromParser, - "openmetrics": NewOpenMetricsParser, +func BenchmarkPromParse(b *testing.B) { + for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ + "prometheus": NewPromParser, + "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st) + }, } { for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { f, err := os.Open(fn) @@ -374,8 +514,9 @@ func BenchmarkParse(b *testing.B) { b.ReportAllocs() b.ResetTimer() + st := labels.NewSymbolTable() for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf) + p := parser(buf, st) Outer: for i < b.N { @@ -402,8 +543,9 @@ func BenchmarkParse(b *testing.B) { b.ReportAllocs() b.ResetTimer() + st := labels.NewSymbolTable() for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf) + p := parser(buf, st) Outer: for i < b.N { @@ -435,8 +577,9 @@ func BenchmarkParse(b *testing.B) { b.ReportAllocs() b.ResetTimer() + st := labels.NewSymbolTable() for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf) + p := parser(buf, st) Outer: for i < b.N { @@ -472,7 +615,7 @@ func BenchmarkParse(b *testing.B) { for i := 0; i < b.N; i += promtestdataSampleCount { decSamples := make(model.Vector, 0, 50) sdec := expfmt.SampleDecoder{ - Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.FmtText), + Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(expfmt.TypeTextPlain)), Opts: &expfmt.DecodeOptions{ Timestamp: model.TimeFromUnixNano(0), }, diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index c111bb0657..9f1400cee3 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -16,6 +16,7 @@ package textparse import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "math" @@ -23,7 +24,7 @@ import ( "unicode/utf8" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" + "github.com/gogo/protobuf/types" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -46,7 +47,7 @@ import ( // the re-arrangement work is actually causing problems (which has to be seen), // that expectation needs to be changed. type ProtobufParser struct { - in []byte // The intput to parse. + in []byte // The input to parse. inPos int // Position within the input. metricPos int // Position within Metric slice. // fieldPos is the position within a Summary or (legacy) Histogram. -2 @@ -55,6 +56,12 @@ type ProtobufParser struct { fieldPos int fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram. + // exemplarPos is the position within the exemplars slice of a native histogram. + exemplarPos int + + // exemplarReturned is set to true each time an exemplar has been + // returned, and set back to false upon each Next() call. + exemplarReturned bool // state is marked by the entry we are processing. EntryInvalid implies // that we have to decode the next MetricFamily. @@ -64,7 +71,7 @@ type ProtobufParser struct { mf *dto.MetricFamily - // Wether to also parse a classic histogram that is also present as a + // Whether to also parse a classic histogram that is also present as a // native histogram. parseClassicHistograms bool @@ -73,13 +80,14 @@ type ProtobufParser struct { } // NewProtobufParser returns a parser for the payload in the byte slice. -func NewProtobufParser(b []byte, parseClassicHistograms bool) Parser { +func NewProtobufParser(b []byte, parseClassicHistograms bool, st *labels.SymbolTable) Parser { return &ProtobufParser{ in: b, state: EntryInvalid, mf: &dto.MetricFamily{}, metricBytes: &bytes.Buffer{}, parseClassicHistograms: parseClassicHistograms, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), } } @@ -143,9 +151,15 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { if ts != 0 { return p.metricBytes.Bytes(), &ts, v } - // Nasty hack: Assume that ts==0 means no timestamp. That's not true in - // general, but proto3 has no distinction between unset and - // default. Need to avoid in the final format. + // TODO(beorn7): We assume here that ts==0 means no timestamp. That's + // not true in general, but proto3 originally has no distinction between + // unset and default. At a later stage, the `optional` keyword was + // (re-)introduced in proto3, but gogo-protobuf never got updated to + // support it. (Note that setting `[(gogoproto.nullable) = true]` for + // the `timestamp_ms` field doesn't help, either.) We plan to migrate + // away from gogo-protobuf to an actively maintained protobuf + // implementation. Once that's done, we can simply use the `optional` + // keyword and check for the unset state explicitly. return p.metricBytes.Bytes(), nil, v } @@ -241,27 +255,28 @@ func (p *ProtobufParser) Help() ([]byte, []byte) { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. -func (p *ProtobufParser) Type() ([]byte, MetricType) { +func (p *ProtobufParser) Type() ([]byte, model.MetricType) { n := p.metricBytes.Bytes() switch p.mf.GetType() { case dto.MetricType_COUNTER: - return n, MetricTypeCounter + return n, model.MetricTypeCounter case dto.MetricType_GAUGE: - return n, MetricTypeGauge + return n, model.MetricTypeGauge case dto.MetricType_HISTOGRAM: - return n, MetricTypeHistogram + return n, model.MetricTypeHistogram case dto.MetricType_GAUGE_HISTOGRAM: - return n, MetricTypeGaugeHistogram + return n, model.MetricTypeGaugeHistogram case dto.MetricType_SUMMARY: - return n, MetricTypeSummary + return n, model.MetricTypeSummary } - return n, MetricTypeUnknown + return n, model.MetricTypeUnknown } -// Unit always returns (nil, nil) because units aren't supported by the protobuf -// format. +// Unit returns the metric unit in the current entry. +// Must only be called after Next returned a unit entry. +// The returned byte slices become invalid after the next call to Next. func (p *ProtobufParser) Unit() ([]byte, []byte) { - return nil, nil + return p.metricBytes.Bytes(), []byte(p.mf.GetUnit()) } // Comment always returns nil because comments aren't supported by the protobuf @@ -292,30 +307,55 @@ func (p *ProtobufParser) Metric(l *labels.Labels) string { // Exemplar writes the exemplar of the current sample into the passed // exemplar. It returns if an exemplar exists or not. In case of a native -// histogram, the legacy bucket section is still used for exemplars. To ingest -// all examplars, call the Exemplar method repeatedly until it returns false. +// histogram, the exemplars in the native histogram will be returned. +// If this field is empty, the classic bucket section is still used for exemplars. +// To ingest all exemplars, call the Exemplar method repeatedly until it returns false. func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { + if p.exemplarReturned && p.state == EntrySeries { + // We only ever return one exemplar per (non-native-histogram) series. + return false + } m := p.mf.GetMetric()[p.metricPos] var exProto *dto.Exemplar switch p.mf.GetType() { case dto.MetricType_COUNTER: exProto = m.GetCounter().GetExemplar() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - bb := m.GetHistogram().GetBucket() - if p.fieldPos < 0 { - if p.state == EntrySeries { - return false // At _count or _sum. + isClassic := p.state == EntrySeries + if !isClassic && len(m.GetHistogram().GetExemplars()) > 0 { + exs := m.GetHistogram().GetExemplars() + for p.exemplarPos < len(exs) { + exProto = exs[p.exemplarPos] + p.exemplarPos++ + if exProto != nil && exProto.GetTimestamp() != nil { + break + } } - p.fieldPos = 0 // Start at 1st bucket for native histograms. - } - for p.fieldPos < len(bb) { - exProto = bb[p.fieldPos].GetExemplar() - if p.state == EntrySeries { - break + if exProto != nil && exProto.GetTimestamp() == nil { + return false } - p.fieldPos++ - if exProto != nil { - break + } else { + bb := m.GetHistogram().GetBucket() + if p.fieldPos < 0 { + if isClassic { + return false // At _count or _sum. + } + p.fieldPos = 0 // Start at 1st bucket for native histograms. + } + for p.fieldPos < len(bb) { + exProto = bb[p.fieldPos].GetExemplar() + if isClassic { + break + } + p.fieldPos++ + // We deliberately drop exemplars with no timestamp only for native histograms. + if exProto != nil && (isClassic || exProto.GetTimestamp() != nil) { + break // Found a classic histogram exemplar or a native histogram exemplar with a timestamp. + } + } + // If the last exemplar for native histograms has no timestamp, ignore it. + if !isClassic && exProto.GetTimestamp() == nil { + return false } } default: @@ -335,16 +375,41 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { } p.builder.Sort() ex.Labels = p.builder.Labels() + p.exemplarReturned = true return true } +// CreatedTimestamp returns CT or nil if CT is not present or +// invalid (as timestamp e.g. negative value) on counters, summaries or histograms. +func (p *ProtobufParser) CreatedTimestamp() *int64 { + var ct *types.Timestamp + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + ct = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() + case dto.MetricType_SUMMARY: + ct = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + ct = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() + default: + } + ctAsTime, err := types.TimestampFromProto(ct) + if err != nil { + // Errors means ct == nil or invalid timestamp, which we silently ignore. + return nil + } + ctMilis := ctAsTime.UnixMilli() + return &ctMilis +} + // Next advances the parser to the next "sample" (emulating the behavior of a // text format parser). It returns (EntryInvalid, io.EOF) if no samples were // read. func (p *ProtobufParser) Next() (Entry, error) { + p.exemplarReturned = false switch p.state { case EntryInvalid: p.metricPos = 0 + p.exemplarPos = 0 p.fieldPos = -2 n, err := readDelimited(p.in[p.inPos:], p.mf) p.inPos += n @@ -361,10 +426,10 @@ func (p *ProtobufParser) Next() (Entry, error) { // into metricBytes and validate only name, help, and type for now. name := p.mf.GetName() if !model.IsValidMetricName(model.LabelValue(name)) { - return EntryInvalid, errors.Errorf("invalid metric name: %s", name) + return EntryInvalid, fmt.Errorf("invalid metric name: %s", name) } if help := p.mf.GetHelp(); !utf8.ValidString(help) { - return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help) + return EntryInvalid, fmt.Errorf("invalid help for metric %q: %s", name, help) } switch p.mf.GetType() { case dto.MetricType_COUNTER, @@ -375,7 +440,17 @@ func (p *ProtobufParser) Next() (Entry, error) { dto.MetricType_UNTYPED: // All good. default: - return EntryInvalid, errors.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + } + unit := p.mf.GetUnit() + if len(unit) > 0 { + if p.mf.GetType() == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + if !strings.HasSuffix(name[:len(name)-6], unit) || len(name)-6 < len(unit)+1 || name[len(name)-6-len(unit)-1] != '_' { + return EntryInvalid, fmt.Errorf("unit %q not a suffix of counter %q", unit, name) + } + } else if !strings.HasSuffix(name, unit) || len(name) < len(unit)+1 || name[len(name)-len(unit)-1] != '_' { + return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", unit, name) + } } p.metricBytes.Reset() p.metricBytes.WriteString(name) @@ -411,6 +486,7 @@ func (p *ProtobufParser) Next() (Entry, error) { p.metricPos++ p.fieldPos = -2 p.fieldsDone = false + p.exemplarPos = 0 // If this is a metric family containing native // histograms, we have to switch back to native // histograms after parsing a classic histogram. @@ -428,7 +504,7 @@ func (p *ProtobufParser) Next() (Entry, error) { return EntryInvalid, err } default: - return EntryInvalid, errors.Errorf("invalid protobuf parsing state: %d", p.state) + return EntryInvalid, fmt.Errorf("invalid protobuf parsing state: %d", p.state) } return p.state, nil } @@ -441,13 +517,13 @@ func (p *ProtobufParser) updateMetricBytes() error { b.WriteByte(model.SeparatorByte) n := lp.GetName() if !model.LabelName(n).IsValid() { - return errors.Errorf("invalid label name: %s", n) + return fmt.Errorf("invalid label name: %s", n) } b.WriteString(n) b.WriteByte(model.SeparatorByte) v := lp.GetValue() if !utf8.ValidString(v) { - return errors.Errorf("invalid label value: %s", v) + return fmt.Errorf("invalid label value: %s", v) } b.WriteString(v) } @@ -522,13 +598,13 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { } totalLength := varIntLength + int(messageLength) if totalLength > len(b) { - return 0, errors.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) + return 0, fmt.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) } mf.Reset() return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) } -// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// formatOpenMetricsFloat works like the usual Go string formatting of a float // but appends ".0" if the resulting number would otherwise contain neither a // "." nor an "e". func formatOpenMetricsFloat(f float64) string { diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index 53523a5dd9..cf34ae52df 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -21,11 +21,13 @@ import ( "testing" "github.com/gogo/protobuf/proto" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/testutil" dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) @@ -57,6 +59,7 @@ metric: < `name: "go_memstats_alloc_bytes_total" help: "Total number of bytes allocated, even if freed." type: COUNTER +unit: "bytes" metric: < counter: < value: 1.546544e+06 @@ -530,6 +533,232 @@ metric: < > > +`, + `name: "test_counter_with_createdtimestamp" +help: "A counter with a created timestamp." +type: COUNTER +metric: < + counter: < + value: 42 + created_timestamp: < + seconds: 1 + nanos: 1 + > + > +> + +`, + `name: "test_summary_with_createdtimestamp" +help: "A summary with a created timestamp." +type: SUMMARY +metric: < + summary: < + sample_count: 42 + sample_sum: 1.234 + created_timestamp: < + seconds: 1 + nanos: 1 + > + > +> + +`, + `name: "test_histogram_with_createdtimestamp" +help: "A histogram with a created timestamp." +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + positive_span: < + offset: 0 + length: 0 + > + > +> + +`, + `name: "test_gaugehistogram_with_createdtimestamp" +help: "A gauge histogram with a created timestamp." +type: GAUGE_HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + positive_span: < + offset: 0 + length: 0 + > + > +> + +`, + `name: "test_histogram_with_native_histogram_exemplars" +help: "A histogram with native histogram exemplars." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + exemplars: < + label: < + name: "dummyID" + value: "59780" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + exemplars: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + exemplars: < + label: < + name: "dummyID" + value: "59772" + > + value: -0.00052 + timestamp: < + seconds: 1625851160 + nanos: 156848499 + > + > + > + timestamp_ms: 1234568 +> + +`, + + `name: "test_histogram_with_native_histogram_exemplars2" +help: "Another histogram with native histogram exemplars." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + exemplars: < + label: < + name: "dummyID" + value: "59780" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + timestamp_ms: 1234568 +> + `, } @@ -559,13 +788,14 @@ func TestProtobufParse(t *testing.T) { m string t int64 v float64 - typ MetricType + typ model.MetricType help string unit string comment string shs *histogram.Histogram fhs *histogram.FloatHistogram e []exemplar.Exemplar + ct int64 } inputBuf := createTestProtoBuf(t) @@ -577,7 +807,7 @@ func TestProtobufParse(t *testing.T) { }{ { name: "ignore classic buckets of native histograms", - parser: NewProtobufParser(inputBuf.Bytes(), false), + parser: NewProtobufParser(inputBuf.Bytes(), false, labels.NewSymbolTable()), expected: []parseResult{ { m: "go_build_info", @@ -585,7 +815,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "go_build_info", - typ: MetricTypeGauge, + typ: model.MetricTypeGauge, }, { m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", @@ -600,10 +830,11 @@ func TestProtobufParse(t *testing.T) { { m: "go_memstats_alloc_bytes_total", help: "Total number of bytes allocated, even if freed.", + unit: "bytes", }, { m: "go_memstats_alloc_bytes_total", - typ: MetricTypeCounter, + typ: model.MetricTypeCounter, }, { m: "go_memstats_alloc_bytes_total", @@ -621,7 +852,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "something_untyped", - typ: MetricTypeUnknown, + typ: model.MetricTypeUnknown, }, { m: "something_untyped", @@ -637,7 +868,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "test_histogram", @@ -664,7 +895,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { @@ -673,7 +903,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gauge_histogram", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { m: "test_gauge_histogram", @@ -701,7 +931,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { @@ -710,7 +939,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_float_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "test_float_histogram", @@ -737,7 +966,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { @@ -746,7 +974,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gauge_float_histogram", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { m: "test_gauge_float_histogram", @@ -774,7 +1002,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { @@ -783,7 +1010,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram2", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "test_histogram2_count", @@ -843,7 +1070,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_family", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "test_histogram_family\xfffoo\xffbar", @@ -887,7 +1114,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_float_histogram_with_zerothreshold_zero", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "test_float_histogram_with_zerothreshold_zero", @@ -911,7 +1138,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "rpc_durations_seconds", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { m: "rpc_durations_seconds_count\xffservice\xffexponential", @@ -962,7 +1189,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "without_quantiles", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { m: "without_quantiles_count", @@ -984,7 +1211,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "empty_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "empty_histogram", @@ -997,11 +1224,162 @@ func TestProtobufParse(t *testing.T) { "__name__", "empty_histogram", ), }, + { + m: "test_counter_with_createdtimestamp", + help: "A counter with a created timestamp.", + }, + { + m: "test_counter_with_createdtimestamp", + typ: model.MetricTypeCounter, + }, + { + m: "test_counter_with_createdtimestamp", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_counter_with_createdtimestamp", + ), + }, + { + m: "test_summary_with_createdtimestamp", + help: "A summary with a created timestamp.", + }, + { + m: "test_summary_with_createdtimestamp", + typ: model.MetricTypeSummary, + }, + { + m: "test_summary_with_createdtimestamp_count", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_count", + ), + }, + { + m: "test_summary_with_createdtimestamp_sum", + v: 1.234, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_sum", + ), + }, + { + m: "test_histogram_with_createdtimestamp", + help: "A histogram with a created timestamp.", + }, + { + m: "test_histogram_with_createdtimestamp", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_createdtimestamp", + ), + }, + { + m: "test_gaugehistogram_with_createdtimestamp", + help: "A gauge histogram with a created timestamp.", + }, + { + m: "test_gaugehistogram_with_createdtimestamp", + typ: model.MetricTypeGaugeHistogram, + }, + { + m: "test_gaugehistogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_gaugehistogram_with_createdtimestamp", + ), + }, + { + m: "test_histogram_with_native_histogram_exemplars", + help: "A histogram with native histogram exemplars.", + }, + { + m: "test_histogram_with_native_histogram_exemplars", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_native_histogram_exemplars", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, + }, + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + help: "Another histogram with native histogram exemplars.", + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, }, }, { name: "parse classic and native buckets", - parser: NewProtobufParser(inputBuf.Bytes(), true), + parser: NewProtobufParser(inputBuf.Bytes(), true, labels.NewSymbolTable()), expected: []parseResult{ { // 0 m: "go_build_info", @@ -1009,7 +1387,7 @@ func TestProtobufParse(t *testing.T) { }, { // 1 m: "go_build_info", - typ: MetricTypeGauge, + typ: model.MetricTypeGauge, }, { // 2 m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", @@ -1027,7 +1405,7 @@ func TestProtobufParse(t *testing.T) { }, { // 4 m: "go_memstats_alloc_bytes_total", - typ: MetricTypeCounter, + typ: model.MetricTypeCounter, }, { // 5 m: "go_memstats_alloc_bytes_total", @@ -1045,7 +1423,7 @@ func TestProtobufParse(t *testing.T) { }, { // 7 m: "something_untyped", - typ: MetricTypeUnknown, + typ: model.MetricTypeUnknown, }, { // 8 m: "something_untyped", @@ -1061,7 +1439,7 @@ func TestProtobufParse(t *testing.T) { }, { // 10 m: "test_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 11 m: "test_histogram", @@ -1088,7 +1466,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { // 12 @@ -1155,7 +1532,7 @@ func TestProtobufParse(t *testing.T) { }, { // 19 m: "test_gauge_histogram", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { // 20 m: "test_gauge_histogram", @@ -1183,7 +1560,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { // 21 @@ -1250,7 +1626,7 @@ func TestProtobufParse(t *testing.T) { }, { // 28 m: "test_float_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 29 m: "test_float_histogram", @@ -1277,7 +1653,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { // 30 @@ -1344,7 +1719,7 @@ func TestProtobufParse(t *testing.T) { }, { // 37 m: "test_gauge_float_histogram", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { // 38 m: "test_gauge_float_histogram", @@ -1372,7 +1747,6 @@ func TestProtobufParse(t *testing.T) { ), e: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, { // 39 @@ -1439,7 +1813,7 @@ func TestProtobufParse(t *testing.T) { }, { // 46 m: "test_histogram2", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 47 m: "test_histogram2_count", @@ -1499,7 +1873,7 @@ func TestProtobufParse(t *testing.T) { }, { // 54 m: "test_histogram_family", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 55 m: "test_histogram_family\xfffoo\xffbar", @@ -1629,7 +2003,7 @@ func TestProtobufParse(t *testing.T) { }, { // 68 m: "test_float_histogram_with_zerothreshold_zero", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 69 m: "test_float_histogram_with_zerothreshold_zero", @@ -1653,7 +2027,7 @@ func TestProtobufParse(t *testing.T) { }, { // 71 m: "rpc_durations_seconds", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { // 72 m: "rpc_durations_seconds_count\xffservice\xffexponential", @@ -1704,7 +2078,7 @@ func TestProtobufParse(t *testing.T) { }, { // 78 m: "without_quantiles", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { // 79 m: "without_quantiles_count", @@ -1720,15 +2094,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "without_quantiles_sum", ), }, - { // 78 + { // 81 m: "empty_histogram", help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", }, - { // 79 + { // 82 m: "empty_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, - { // 80 + { // 83 m: "empty_histogram", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -1739,6 +2113,267 @@ func TestProtobufParse(t *testing.T) { "__name__", "empty_histogram", ), }, + { // 84 + m: "test_counter_with_createdtimestamp", + help: "A counter with a created timestamp.", + }, + { // 85 + m: "test_counter_with_createdtimestamp", + typ: model.MetricTypeCounter, + }, + { // 86 + m: "test_counter_with_createdtimestamp", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_counter_with_createdtimestamp", + ), + }, + { // 87 + m: "test_summary_with_createdtimestamp", + help: "A summary with a created timestamp.", + }, + { // 88 + m: "test_summary_with_createdtimestamp", + typ: model.MetricTypeSummary, + }, + { // 89 + m: "test_summary_with_createdtimestamp_count", + v: 42, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_count", + ), + }, + { // 90 + m: "test_summary_with_createdtimestamp_sum", + v: 1.234, + ct: 1000, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_sum", + ), + }, + { // 91 + m: "test_histogram_with_createdtimestamp", + help: "A histogram with a created timestamp.", + }, + { // 92 + m: "test_histogram_with_createdtimestamp", + typ: model.MetricTypeHistogram, + }, + { // 93 + m: "test_histogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_createdtimestamp", + ), + }, + { // 94 + m: "test_gaugehistogram_with_createdtimestamp", + help: "A gauge histogram with a created timestamp.", + }, + { // 95 + m: "test_gaugehistogram_with_createdtimestamp", + typ: model.MetricTypeGaugeHistogram, + }, + { // 96 + m: "test_gaugehistogram_with_createdtimestamp", + ct: 1000, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_gaugehistogram_with_createdtimestamp", + ), + }, + { // 97 + m: "test_histogram_with_native_histogram_exemplars", + help: "A histogram with native histogram exemplars.", + }, + { // 98 + m: "test_histogram_with_native_histogram_exemplars", + typ: model.MetricTypeHistogram, + }, + { // 99 + m: "test_histogram_with_native_histogram_exemplars", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, + }, + }, + { // 100 + m: "test_histogram_with_native_histogram_exemplars_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_count", + ), + }, + { // 101 + m: "test_histogram_with_native_histogram_exemplars_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_sum", + ), + }, + { // 102 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 103 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 104 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 105 + m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars_bucket", + "le", "+Inf", + ), + }, + { // 106 + m: "test_histogram_with_native_histogram_exemplars2", + help: "Another histogram with native histogram exemplars.", + }, + { // 107 + m: "test_histogram_with_native_histogram_exemplars2", + typ: model.MetricTypeHistogram, + }, + { // 108 + m: "test_histogram_with_native_histogram_exemplars2", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 109 + m: "test_histogram_with_native_histogram_exemplars2_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_count", + ), + }, + { // 110 + m: "test_histogram_with_native_histogram_exemplars2_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_sum", + ), + }, + { // 111 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 112 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0003899999999999998", + ), + }, + { // 113 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0002899999999999998", + ), + }, + { // 114 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "+Inf", + ), + }, }, }, } @@ -1765,20 +2400,28 @@ func TestProtobufParse(t *testing.T) { var e exemplar.Exemplar p.Metric(&res) - found := p.Exemplar(&e) + eFound := p.Exemplar(&e) + ct := p.CreatedTimestamp() require.Equal(t, exp[i].m, string(m), "i: %d", i) if ts != nil { require.Equal(t, exp[i].t, *ts, "i: %d", i) } else { - require.Equal(t, exp[i].t, int64(0), "i: %d", i) + require.Equal(t, int64(0), exp[i].t, "i: %d", i) } require.Equal(t, exp[i].v, v, "i: %d", i) - require.Equal(t, exp[i].lset, res, "i: %d", i) + testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) if len(exp[i].e) == 0 { - require.Equal(t, false, found, "i: %d", i) + require.False(t, eFound, "i: %d", i) + } else { + require.True(t, eFound, "i: %d", i) + testutil.RequireEqual(t, exp[i].e[0], e, "i: %d", i) + require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i) + } + if exp[i].ct != 0 { + require.NotNilf(t, ct, "i: %d", i) + require.Equal(t, exp[i].ct, *ct, "i: %d", i) } else { - require.Equal(t, true, found, "i: %d", i) - require.Equal(t, exp[i].e[0], e, "i: %d", i) + require.Nilf(t, ct, "i: %d", i) } case EntryHistogram: @@ -1788,9 +2431,9 @@ func TestProtobufParse(t *testing.T) { if ts != nil { require.Equal(t, exp[i].t, *ts, "i: %d", i) } else { - require.Equal(t, exp[i].t, int64(0), "i: %d", i) + require.Equal(t, int64(0), exp[i].t, "i: %d", i) } - require.Equal(t, exp[i].lset, res, "i: %d", i) + testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) require.Equal(t, exp[i].m, string(m), "i: %d", i) if shs != nil { require.Equal(t, exp[i].shs, shs, "i: %d", i) @@ -1799,10 +2442,10 @@ func TestProtobufParse(t *testing.T) { } j := 0 for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ { - require.Equal(t, exp[i].e[j], e, "i: %d", i) + testutil.RequireEqual(t, exp[i].e[j], e, "i: %d", i) e = exemplar.Exemplar{} } - require.Equal(t, len(exp[i].e), j, "not enough exemplars found, i: %d", i) + require.Len(t, exp[i].e, j, "not enough exemplars found, i: %d", i) case EntryType: m, typ := p.Type() @@ -1825,7 +2468,7 @@ func TestProtobufParse(t *testing.T) { i++ } - require.Equal(t, len(exp), i) + require.Len(t, exp, i) }) } } diff --git a/notifier/notifier.go b/notifier/notifier.go index 891372c43e..5374e73d62 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -32,6 +32,7 @@ import ( "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" "go.uber.org/atomic" @@ -109,10 +110,11 @@ type Manager struct { metrics *alertMetrics - more chan struct{} - mtx sync.RWMutex - ctx context.Context - cancel func() + more chan struct{} + mtx sync.RWMutex + + stopOnce *sync.Once + stopRequested chan struct{} alertmanagers map[string]*alertmanagerSet logger log.Logger @@ -120,9 +122,10 @@ type Manager struct { // Options are the configurable parameters of a Handler. type Options struct { - QueueCapacity int - ExternalLabels labels.Labels - RelabelConfigs []*relabel.Config + QueueCapacity int + DrainOnShutdown bool + ExternalLabels labels.Labels + RelabelConfigs []*relabel.Config // Used for sending HTTP requests to the Alertmanager. Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) @@ -216,8 +219,6 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp // NewManager is the manager constructor. func NewManager(o *Options, logger log.Logger) *Manager { - ctx, cancel := context.WithCancel(context.Background()) - if o.Do == nil { o.Do = do } @@ -226,12 +227,12 @@ func NewManager(o *Options, logger log.Logger) *Manager { } n := &Manager{ - queue: make([]*Alert, 0, o.QueueCapacity), - ctx: ctx, - cancel: cancel, - more: make(chan struct{}, 1), - opts: o, - logger: logger, + queue: make([]*Alert, 0, o.QueueCapacity), + more: make(chan struct{}, 1), + stopRequested: make(chan struct{}), + stopOnce: &sync.Once{}, + opts: o, + logger: logger, } queueLenFunc := func() float64 { return float64(n.queueLen()) } @@ -297,36 +298,98 @@ func (n *Manager) nextBatch() []*Alert { return alerts } -// Run dispatches notifications continuously. +// Run dispatches notifications continuously, returning once Stop has been called and all +// pending notifications have been drained from the queue (if draining is enabled). +// +// Dispatching of notifications occurs in parallel to processing target updates to avoid one starving the other. +// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details. func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { + wg := sync.WaitGroup{} + wg.Add(2) + + go func() { + defer wg.Done() + n.targetUpdateLoop(tsets) + }() + + go func() { + defer wg.Done() + n.sendLoop() + n.drainQueue() + }() + + wg.Wait() + level.Info(n.logger).Log("msg", "Notification manager stopped") +} + +// sendLoop continuously consumes the notifications queue and sends alerts to +// the configured Alertmanagers. +func (n *Manager) sendLoop() { for { - // The select is split in two parts, such as we will first try to read - // new alertmanager targets if they are available, before sending new - // alerts. + // If we've been asked to stop, that takes priority over sending any further notifications. select { - case <-n.ctx.Done(): + case <-n.stopRequested: return - case ts := <-tsets: - n.reload(ts) default: select { - case <-n.ctx.Done(): + case <-n.stopRequested: return - case ts := <-tsets: - n.reload(ts) + case <-n.more: + n.sendOneBatch() + + // If the queue still has items left, kick off the next iteration. + if n.queueLen() > 0 { + n.setMore() + } } } - alerts := n.nextBatch() + } +} - if !n.sendAll(alerts...) { - n.metrics.dropped.Add(float64(len(alerts))) +// targetUpdateLoop receives updates of target groups and triggers a reload. +func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group) { + for { + // If we've been asked to stop, that takes priority over processing any further target group updates. + select { + case <-n.stopRequested: + return + default: + select { + case <-n.stopRequested: + return + case ts := <-tsets: + n.reload(ts) + } } - // If the queue still has items left, kick off the next iteration. + } +} + +func (n *Manager) sendOneBatch() { + alerts := n.nextBatch() + + if !n.sendAll(alerts...) { + n.metrics.dropped.Add(float64(len(alerts))) + } +} + +func (n *Manager) drainQueue() { + if !n.opts.DrainOnShutdown { if n.queueLen() > 0 { - n.setMore() + level.Warn(n.logger).Log("msg", "Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) + n.metrics.dropped.Add(float64(n.queueLen())) } + + return } + + level.Info(n.logger).Log("msg", "Draining any remaining notifications...") + + for n.queueLen() > 0 { + n.sendOneBatch() + } + + level.Info(n.logger).Log("msg", "Remaining notifications drained") } func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { @@ -349,7 +412,7 @@ func (n *Manager) Send(alerts ...*Alert) { n.mtx.Lock() defer n.mtx.Unlock() - alerts = n.relabelAlerts(alerts) + alerts = relabelAlerts(n.opts.RelabelConfigs, n.opts.ExternalLabels, alerts) if len(alerts) == 0 { return } @@ -377,20 +440,19 @@ func (n *Manager) Send(alerts ...*Alert) { n.setMore() } -// Attach external labels and process relabelling rules. -func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert { +func relabelAlerts(relabelConfigs []*relabel.Config, externalLabels labels.Labels, alerts []*Alert) []*Alert { lb := labels.NewBuilder(labels.EmptyLabels()) var relabeledAlerts []*Alert for _, a := range alerts { lb.Reset(a.Labels) - n.opts.ExternalLabels.Range(func(l labels.Label) { + externalLabels.Range(func(l labels.Label) { if a.Labels.Get(l.Name) == "" { lb.Set(l.Name, l.Value) } }) - keep := relabel.ProcessBuilder(lb, n.opts.RelabelConfigs...) + keep := relabel.ProcessBuilder(lb, relabelConfigs...) if !keep { continue } @@ -472,17 +534,33 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { ) for _, ams := range amSets { var ( - payload []byte - err error + payload []byte + err error + amAlerts = alerts ) ams.mtx.RLock() + if len(ams.ams) == 0 { + ams.mtx.RUnlock() + continue + } + + if len(ams.cfg.AlertRelabelConfigs) > 0 { + amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts) + if len(amAlerts) == 0 { + ams.mtx.RUnlock() + continue + } + // We can't use the cached values from previous iteration. + v1Payload, v2Payload = nil, nil + } + switch ams.cfg.APIVersion { case config.AlertmanagerAPIVersionV1: { if v1Payload == nil { - v1Payload, err = json.Marshal(alerts) + v1Payload, err = json.Marshal(amAlerts) if err != nil { level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v1 failed", "err", err) ams.mtx.RUnlock() @@ -495,7 +573,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { case config.AlertmanagerAPIVersionV2: { if v2Payload == nil { - openAPIAlerts := alertsToOpenAPIAlerts(alerts) + openAPIAlerts := alertsToOpenAPIAlerts(amAlerts) v2Payload, err = json.Marshal(openAPIAlerts) if err != nil { @@ -518,24 +596,29 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { } } + if len(ams.cfg.AlertRelabelConfigs) > 0 { + // We can't use the cached values on the next iteration. + v1Payload, v2Payload = nil, nil + } + for _, am := range ams.ams { wg.Add(1) - ctx, cancel := context.WithTimeout(n.ctx, time.Duration(ams.cfg.Timeout)) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout)) defer cancel() - go func(client *http.Client, url string) { + go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { if err := n.sendOne(ctx, client, url, payload); err != nil { - level.Error(n.logger).Log("alertmanager", url, "count", len(alerts), "msg", "Error sending alert", "err", err) + level.Error(n.logger).Log("alertmanager", url, "count", count, "msg", "Error sending alert", "err", err) n.metrics.errors.WithLabelValues(url).Inc() } else { numSuccess.Inc() } n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds()) - n.metrics.sent.WithLabelValues(url).Add(float64(len(alerts))) + n.metrics.sent.WithLabelValues(url).Add(float64(len(amAlerts))) wg.Done() - }(ams.client, am.url().String()) + }(ctx, ams.client, am.url().String(), payload, len(amAlerts)) } ams.mtx.RUnlock() @@ -575,7 +658,7 @@ func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet { } func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []byte) error { - req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(b)) if err != nil { return err } @@ -598,10 +681,19 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b return nil } -// Stop shuts down the notification handler. +// Stop signals the notification manager to shut down and immediately returns. +// +// Run will return once the notification manager has successfully shut down. +// +// The manager will optionally drain any queued notifications before shutting down. +// +// Stop is safe to call multiple times. func (n *Manager) Stop() { level.Info(n.logger).Log("msg", "Stopping notification manager...") - n.cancel() + + n.stopOnce.Do(func() { + close(n.stopRequested) + }) } // Alertmanager holds Alertmanager endpoint information. @@ -640,6 +732,17 @@ func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metri if err != nil { return nil, err } + t := client.Transport + + if cfg.SigV4Config != nil { + t, err = sigv4.NewSigV4RoundTripper(cfg.SigV4Config, client.Transport) + if err != nil { + return nil, err + } + } + + client.Transport = t + s := &alertmanagerSet{ client: client, cfg: cfg, @@ -667,6 +770,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { s.mtx.Lock() defer s.mtx.Unlock() + previousAms := s.ams // Set new Alertmanagers and deduplicate them along their unique URL. s.ams = []alertmanager{} s.droppedAms = []alertmanager{} @@ -686,6 +790,17 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { seen[us] = struct{}{} s.ams = append(s.ams, am) } + // Now remove counters for any removed Alertmanagers. + for _, am := range previousAms { + us := am.url().String() + if _, ok := seen[us]; ok { + continue + } + s.metrics.latency.DeleteLabelValues(us) + s.metrics.sent.DeleteLabelValues(us) + s.metrics.errors.DeleteLabelValues(us) + seen[us] = struct{}{} + } } func postPath(pre string, v config.AlertmanagerAPIVersion) string { diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index 66ee45c6e8..68dd445811 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -26,13 +26,17 @@ import ( "testing" "time" + "github.com/go-kit/log" "github.com/prometheus/alertmanager/api/v2/models" + "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" @@ -74,7 +78,7 @@ func TestHandlerNextBatch(t *testing.T) { for i := range make([]struct{}, 2*maxBatchSize+1) { h.queue = append(h.queue, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } @@ -83,7 +87,7 @@ func TestHandlerNextBatch(t *testing.T) { require.NoError(t, alertsEqual(expected[0:maxBatchSize], h.nextBatch())) require.NoError(t, alertsEqual(expected[maxBatchSize:2*maxBatchSize], h.nextBatch())) require.NoError(t, alertsEqual(expected[2*maxBatchSize:], h.nextBatch())) - require.Equal(t, 0, len(h.queue), "Expected queue to be empty but got %d alerts", len(h.queue)) + require.Empty(t, h.queue, "Expected queue to be empty but got %d alerts", len(h.queue)) } func alertsEqual(a, b []*Alert) error { @@ -98,6 +102,41 @@ func alertsEqual(a, b []*Alert) error { return nil } +func newTestHTTPServerBuilder(expected *[]*Alert, errc chan<- error, u, p string, status *atomic.Int32) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var err error + defer func() { + if err == nil { + return + } + select { + case errc <- err: + default: + } + }() + user, pass, _ := r.BasicAuth() + if user != u || pass != p { + err = fmt.Errorf("unexpected user/password: %s/%s != %s/%s", user, pass, u, p) + w.WriteHeader(http.StatusInternalServerError) + return + } + + b, err := io.ReadAll(r.Body) + if err != nil { + err = fmt.Errorf("error reading body: %w", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + var alerts []*Alert + err = json.Unmarshal(b, &alerts) + if err == nil { + err = alertsEqual(*expected, alerts) + } + w.WriteHeader(int(status.Load())) + })) +} + func TestHandlerSendAll(t *testing.T) { var ( errc = make(chan error, 1) @@ -107,42 +146,8 @@ func TestHandlerSendAll(t *testing.T) { status1.Store(int32(http.StatusOK)) status2.Store(int32(http.StatusOK)) - newHTTPServer := func(u, p string, status *atomic.Int32) *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var err error - defer func() { - if err == nil { - return - } - select { - case errc <- err: - default: - } - }() - user, pass, _ := r.BasicAuth() - if user != u || pass != p { - err = fmt.Errorf("unexpected user/password: %s/%s != %s/%s", user, pass, u, p) - w.WriteHeader(http.StatusInternalServerError) - return - } - - b, err := io.ReadAll(r.Body) - if err != nil { - err = fmt.Errorf("error reading body: %w", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - var alerts []*Alert - err = json.Unmarshal(b, &alerts) - if err == nil { - err = alertsEqual(expected, alerts) - } - w.WriteHeader(int(status.Load())) - })) - } - server1 := newHTTPServer("prometheus", "testing_password", &status1) - server2 := newHTTPServer("", "", &status2) + server1 := newTestHTTPServerBuilder(&expected, errc, "prometheus", "testing_password", &status1) + server2 := newTestHTTPServerBuilder(&expected, errc, "", "", &status2) defer server1.Close() defer server2.Close() @@ -185,10 +190,10 @@ func TestHandlerSendAll(t *testing.T) { for i := range make([]struct{}, maxBatchSize) { h.queue = append(h.queue, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) expected = append(expected, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } @@ -213,6 +218,129 @@ func TestHandlerSendAll(t *testing.T) { checkNoErr() } +func TestHandlerSendAllRemapPerAm(t *testing.T) { + var ( + errc = make(chan error, 1) + expected1 = make([]*Alert, 0, maxBatchSize) + expected2 = make([]*Alert, 0, maxBatchSize) + expected3 = make([]*Alert, 0) + + statusOK atomic.Int32 + ) + statusOK.Store(int32(http.StatusOK)) + + server1 := newTestHTTPServerBuilder(&expected1, errc, "", "", &statusOK) + server2 := newTestHTTPServerBuilder(&expected2, errc, "", "", &statusOK) + server3 := newTestHTTPServerBuilder(&expected3, errc, "", "", &statusOK) + + defer server1.Close() + defer server2.Close() + defer server3.Close() + + h := NewManager(&Options{}, nil) + h.alertmanagers = make(map[string]*alertmanagerSet) + + am1Cfg := config.DefaultAlertmanagerConfig + am1Cfg.Timeout = model.Duration(time.Second) + + am2Cfg := config.DefaultAlertmanagerConfig + am2Cfg.Timeout = model.Duration(time.Second) + am2Cfg.AlertRelabelConfigs = []*relabel.Config{ + { + SourceLabels: model.LabelNames{"alertnamedrop"}, + Action: "drop", + Regex: relabel.MustNewRegexp(".+"), + }, + } + + am3Cfg := config.DefaultAlertmanagerConfig + am3Cfg.Timeout = model.Duration(time.Second) + am3Cfg.AlertRelabelConfigs = []*relabel.Config{ + { + SourceLabels: model.LabelNames{"alertname"}, + Action: "drop", + Regex: relabel.MustNewRegexp(".+"), + }, + } + + h.alertmanagers = map[string]*alertmanagerSet{ + // Drop no alerts. + "1": { + ams: []alertmanager{ + alertmanagerMock{ + urlf: func() string { return server1.URL }, + }, + }, + cfg: &am1Cfg, + }, + // Drop only alerts with the "alertnamedrop" label. + "2": { + ams: []alertmanager{ + alertmanagerMock{ + urlf: func() string { return server2.URL }, + }, + }, + cfg: &am2Cfg, + }, + // Drop all alerts. + "3": { + ams: []alertmanager{ + alertmanagerMock{ + urlf: func() string { return server3.URL }, + }, + }, + cfg: &am3Cfg, + }, + // Empty list of Alertmanager endpoints. + "4": { + ams: []alertmanager{}, + cfg: &config.DefaultAlertmanagerConfig, + }, + } + + for i := range make([]struct{}, maxBatchSize/2) { + h.queue = append(h.queue, + &Alert{ + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), + }, + &Alert{ + Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)), + }, + ) + + expected1 = append(expected1, + &Alert{ + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), + }, &Alert{ + Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)), + }, + ) + + expected2 = append(expected2, &Alert{ + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), + }) + } + + checkNoErr := func() { + t.Helper() + select { + case err := <-errc: + require.NoError(t, err) + default: + } + } + + require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly") + checkNoErr() + + // Verify that individual locks are released. + for k := range h.alertmanagers { + h.alertmanagers[k].mtx.Lock() + h.alertmanagers[k].ams = nil + h.alertmanagers[k].mtx.Unlock() + } +} + func TestCustomDo(t *testing.T) { const testURL = "http://testurl.com/" const testBody = "testbody" @@ -378,7 +506,7 @@ func TestHandlerQueuing(t *testing.T) { var alerts []*Alert for i := range make([]struct{}, 20*maxBatchSize) { alerts = append(alerts, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } @@ -482,7 +610,7 @@ alerting: ` err := yaml.UnmarshalStrict([]byte(s), cfg) require.NoError(t, err, "Unable to load YAML config.") - require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 1) err = n.ApplyConfig(cfg) require.NoError(t, err, "Error applying the config.") @@ -533,7 +661,7 @@ alerting: ` err := yaml.UnmarshalStrict([]byte(s), cfg) require.NoError(t, err, "Unable to load YAML config.") - require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 1) err = n.ApplyConfig(cfg) require.NoError(t, err, "Error applying the config.") @@ -573,117 +701,319 @@ func TestLabelsToOpenAPILabelSet(t *testing.T) { require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.FromStrings("aaa", "111", "bbb", "222"))) } -// TestHangingNotifier validates that targets updates happen even when there are -// queued alerts. +// TestHangingNotifier ensures that the notifier takes into account SD changes even when there are +// queued alerts. This test reproduces the issue described in https://github.com/prometheus/prometheus/issues/13676. +// and https://github.com/prometheus/prometheus/issues/8768. func TestHangingNotifier(t *testing.T) { - // Note: When targets are not updated in time, this test is flaky because go - // selects are not deterministic. Therefore we run 10 subtests to run into the issue. - for i := 0; i < 10; i++ { - t.Run(strconv.Itoa(i), func(t *testing.T) { - var ( - done = make(chan struct{}) - changed = make(chan struct{}) - syncCh = make(chan map[string][]*targetgroup.Group) - ) - - defer func() { - close(done) - }() - - var calledOnce bool - // Setting up a bad server. This server hangs for 2 seconds. - badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if calledOnce { - t.Fatal("hanging server called multiple times") - } - calledOnce = true - select { - case <-done: - case <-time.After(2 * time.Second): - } - })) - badURL, err := url.Parse(badServer.URL) - require.NoError(t, err) - badAddress := badURL.Host // Used for __name__ label in targets. - - // Setting up a bad server. This server returns fast, signaling requests on - // by closing the changed channel. - goodServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - close(changed) - })) - goodURL, err := url.Parse(goodServer.URL) - require.NoError(t, err) - goodAddress := goodURL.Host // Used for __name__ label in targets. + const ( + batches = 100 + alertsCount = maxBatchSize * batches + ) - h := NewManager( - &Options{ - QueueCapacity: 20 * maxBatchSize, - }, - nil, - ) + var ( + sendTimeout = 100 * time.Millisecond + sdUpdatert = sendTimeout / 2 - h.alertmanagers = make(map[string]*alertmanagerSet) + done = make(chan struct{}) + ) - am1Cfg := config.DefaultAlertmanagerConfig - am1Cfg.Timeout = model.Duration(200 * time.Millisecond) + defer func() { + close(done) + }() - h.alertmanagers["config-0"] = &alertmanagerSet{ - ams: []alertmanager{}, - cfg: &am1Cfg, - metrics: h.metrics, - } - go h.Run(syncCh) - defer h.Stop() + // Set up a faulty Alertmanager. + var faultyCalled atomic.Bool + faultyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + faultyCalled.Store(true) + select { + case <-done: + case <-time.After(time.Hour): + } + })) + faultyURL, err := url.Parse(faultyServer.URL) + require.NoError(t, err) - var alerts []*Alert - for i := range make([]struct{}, 20*maxBatchSize) { - alerts = append(alerts, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), - }) + // Set up a functional Alertmanager. + var functionalCalled atomic.Bool + functionalServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + functionalCalled.Store(true) + })) + functionalURL, err := url.Parse(functionalServer.URL) + require.NoError(t, err) + + // Initialize the discovery manager + // This is relevant as the updates aren't sent continually in real life, but only each updatert. + // The old implementation of TestHangingNotifier didn't take that into account. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + reg := prometheus.NewRegistry() + sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg)) + require.NoError(t, err) + sdManager := discovery.NewManager( + ctx, + log.NewNopLogger(), + reg, + sdMetrics, + discovery.Name("sd-manager"), + discovery.Updatert(sdUpdatert), + ) + go sdManager.Run() + + // Set up the notifier with both faulty and functional Alertmanagers. + notifier := NewManager( + &Options{ + QueueCapacity: alertsCount, + }, + nil, + ) + notifier.alertmanagers = make(map[string]*alertmanagerSet) + amCfg := config.DefaultAlertmanagerConfig + amCfg.Timeout = model.Duration(sendTimeout) + notifier.alertmanagers["config-0"] = &alertmanagerSet{ + ams: []alertmanager{ + alertmanagerMock{ + urlf: func() string { return faultyURL.String() }, + }, + alertmanagerMock{ + urlf: func() string { return functionalURL.String() }, + }, + }, + cfg: &amCfg, + metrics: notifier.metrics, + } + go notifier.Run(sdManager.SyncCh()) + defer notifier.Stop() + + require.Len(t, notifier.Alertmanagers(), 2) + + // Enqueue the alerts. + var alerts []*Alert + for i := range make([]struct{}, alertsCount) { + alerts = append(alerts, &Alert{ + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), + }) + } + notifier.Send(alerts...) + + // Wait for the Alertmanagers to start receiving alerts. + // 10*sdUpdatert is used as an arbitrary timeout here. + timeout := time.After(10 * sdUpdatert) +loop1: + for { + select { + case <-timeout: + t.Fatalf("Timeout waiting for the alertmanagers to be reached for the first time.") + default: + if faultyCalled.Load() && functionalCalled.Load() { + break loop1 } + } + } - // Injecting the hanging server URL. - syncCh <- map[string][]*targetgroup.Group{ - "config-0": { - { - Targets: []model.LabelSet{ - { - model.AddressLabel: model.LabelValue(badAddress), - }, + // Request to remove the faulty Alertmanager. + c := map[string]discovery.Configs{ + "config-0": { + discovery.StaticConfig{ + &targetgroup.Group{ + Targets: []model.LabelSet{ + { + model.AddressLabel: model.LabelValue(functionalURL.Host), }, }, }, + }, + }, + } + require.NoError(t, sdManager.ApplyConfig(c)) + + // The notifier should not wait until the alerts queue is empty to apply the discovery changes + // A faulty Alertmanager could cause each alert sending cycle to take up to AlertmanagerConfig.Timeout + // The queue may never be emptied, as the arrival rate could be larger than the departure rate + // It could even overflow and alerts could be dropped. + timeout = time.After(batches * sendTimeout) +loop2: + for { + select { + case <-timeout: + t.Fatalf("Timeout, the faulty alertmanager not removed on time.") + default: + // The faulty alertmanager was dropped. + if len(notifier.Alertmanagers()) == 1 { + // Prevent from TOCTOU. + require.Positive(t, notifier.queueLen()) + break loop2 } + require.Positive(t, notifier.queueLen(), "The faulty alertmanager wasn't dropped before the alerts queue was emptied.") + } + } +} - // Queing alerts. - h.Send(alerts...) +func TestStop_DrainingDisabled(t *testing.T) { + releaseReceiver := make(chan struct{}) + receiverReceivedRequest := make(chan struct{}, 2) + alertsReceived := atomic.NewInt64(0) - // Updating with a working alertmanager target. - go func() { - select { - case syncCh <- map[string][]*targetgroup.Group{ - "config-0": { - { - Targets: []model.LabelSet{ - { - model.AddressLabel: model.LabelValue(goodAddress), - }, - }, - }, - }, - }: - case <-done: - } - }() + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Let the test know we've received a request. + receiverReceivedRequest <- struct{}{} - select { - case <-time.After(1 * time.Second): - t.Fatalf("Timeout after 1 second, targets not synced in time.") - case <-changed: - // The good server has been hit in less than 3 seconds, therefore - // targets have been updated before a second call could be made to the - // bad server. - } - }) + var alerts []*Alert + + b, err := io.ReadAll(r.Body) + require.NoError(t, err) + + err = json.Unmarshal(b, &alerts) + require.NoError(t, err) + + alertsReceived.Add(int64(len(alerts))) + + // Wait for the test to release us. + <-releaseReceiver + + w.WriteHeader(http.StatusOK) + })) + defer func() { + server.Close() + }() + + m := NewManager( + &Options{ + QueueCapacity: 10, + DrainOnShutdown: false, + }, + nil, + ) + + m.alertmanagers = make(map[string]*alertmanagerSet) + + am1Cfg := config.DefaultAlertmanagerConfig + am1Cfg.Timeout = model.Duration(time.Second) + + m.alertmanagers["1"] = &alertmanagerSet{ + ams: []alertmanager{ + alertmanagerMock{ + urlf: func() string { return server.URL }, + }, + }, + cfg: &am1Cfg, } + + notificationManagerStopped := make(chan struct{}) + + go func() { + defer close(notificationManagerStopped) + m.Run(nil) + }() + + // Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later. + m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")}) + + select { + case <-receiverReceivedRequest: + // Nothing more to do. + case <-time.After(time.Second): + require.FailNow(t, "gave up waiting for receiver to receive notification of first alert") + } + + m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")}) + + // Stop the notification manager, pause to allow the shutdown to be observed, and then allow the receiver to proceed. + m.Stop() + time.Sleep(time.Second) + close(releaseReceiver) + + // Wait for the notification manager to stop and confirm only the first notification was sent. + // The second notification should be dropped. + select { + case <-notificationManagerStopped: + // Nothing more to do. + case <-time.After(time.Second): + require.FailNow(t, "gave up waiting for notification manager to stop") + } + + require.Equal(t, int64(1), alertsReceived.Load()) +} + +func TestStop_DrainingEnabled(t *testing.T) { + releaseReceiver := make(chan struct{}) + receiverReceivedRequest := make(chan struct{}, 2) + alertsReceived := atomic.NewInt64(0) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Let the test know we've received a request. + receiverReceivedRequest <- struct{}{} + + var alerts []*Alert + + b, err := io.ReadAll(r.Body) + require.NoError(t, err) + + err = json.Unmarshal(b, &alerts) + require.NoError(t, err) + + alertsReceived.Add(int64(len(alerts))) + + // Wait for the test to release us. + <-releaseReceiver + + w.WriteHeader(http.StatusOK) + })) + defer func() { + server.Close() + }() + + m := NewManager( + &Options{ + QueueCapacity: 10, + DrainOnShutdown: true, + }, + nil, + ) + + m.alertmanagers = make(map[string]*alertmanagerSet) + + am1Cfg := config.DefaultAlertmanagerConfig + am1Cfg.Timeout = model.Duration(time.Second) + + m.alertmanagers["1"] = &alertmanagerSet{ + ams: []alertmanager{ + alertmanagerMock{ + urlf: func() string { return server.URL }, + }, + }, + cfg: &am1Cfg, + } + + notificationManagerStopped := make(chan struct{}) + + go func() { + defer close(notificationManagerStopped) + m.Run(nil) + }() + + // Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later. + m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")}) + + select { + case <-receiverReceivedRequest: + // Nothing more to do. + case <-time.After(time.Second): + require.FailNow(t, "gave up waiting for receiver to receive notification of first alert") + } + + m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")}) + + // Stop the notification manager and allow the receiver to proceed. + m.Stop() + close(releaseReceiver) + + // Wait for the notification manager to stop and confirm both notifications were sent. + select { + case <-notificationManagerStopped: + // Nothing more to do. + case <-time.After(200 * time.Millisecond): + require.FailNow(t, "gave up waiting for notification manager to stop") + } + + require.Equal(t, int64(2), alertsReceived.Load()) } diff --git a/plugins.yml b/plugins.yml index c10dabddb6..c7b9d297d0 100644 --- a/plugins.yml +++ b/plugins.yml @@ -18,5 +18,6 @@ - github.com/prometheus/prometheus/discovery/scaleway - github.com/prometheus/prometheus/discovery/triton - github.com/prometheus/prometheus/discovery/uyuni +- github.com/prometheus/prometheus/discovery/vultr - github.com/prometheus/prometheus/discovery/xds - github.com/prometheus/prometheus/discovery/zookeeper diff --git a/plugins/generate.go b/plugins/generate.go index 32a32d890b..3db6de4ab0 100644 --- a/plugins/generate.go +++ b/plugins/generate.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build plugins -// +build plugins package main diff --git a/prompb/codec.go b/prompb/codec.go new file mode 100644 index 0000000000..ad30cd5e7b --- /dev/null +++ b/prompb/codec.go @@ -0,0 +1,201 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prompb + +import ( + "strings" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" +) + +// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon. + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels { + return labelProtosToLabels(b, m.GetLabels()) +} + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m ChunkedSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels { + return labelProtosToLabels(b, m.GetLabels()) +} + +func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []Label) labels.Labels { + b.Reset() + for _, l := range labelPairs { + b.Add(l.Name, l.Value) + } + b.Sort() + return b.Labels() +} + +// FromLabels transforms labels into prompb labels. The buffer slice +// will be used to avoid allocations if it is big enough to store the labels. +func FromLabels(lbls labels.Labels, buf []Label) []Label { + result := buf[:0] + lbls.Range(func(l labels.Label) { + result = append(result, Label{ + Name: l.Name, + Value: l.Value, + }) + }) + return result +} + +// FromMetadataType transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum. +func FromMetadataType(t model.MetricType) MetricMetadata_MetricType { + mt := strings.ToUpper(string(t)) + v, ok := MetricMetadata_MetricType_value[mt] + if !ok { + return MetricMetadata_UNKNOWN + } + return MetricMetadata_MetricType(v) +} + +// IsFloatHistogram returns true if the histogram is float. +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + +// ToIntHistogram returns integer Prometheus histogram from the remote implementation +// of integer histogram. If it's a float histogram, the method returns nil. +func (h Histogram) ToIntHistogram() *histogram.Histogram { + if h.IsFloatHistogram() { + return nil + } + return &histogram.Histogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountInt(), + Count: h.GetCountInt(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeDeltas(), + } +} + +// ToFloatHistogram returns float Prometheus histogram from the remote implementation +// of float histogram. If the underlying implementation is an integer histogram, a +// conversion is performed. +func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { + if h.IsFloatHistogram() { + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountFloat(), + Count: h.GetCountFloat(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveCounts(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeCounts(), + } + } + // Conversion from integer histogram. + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.GetZeroCountInt()), + Count: float64(h.GetCountInt()), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()), + } +} + +func spansProtoToSpans(s []BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func deltasToCounts(deltas []int64) []float64 { + counts := make([]float64, len(deltas)) + var cur float64 + for i, d := range deltas { + cur += float64(d) + counts[i] = cur + } + return counts +} + +// FromIntHistogram returns remote Histogram from the integer Histogram. +func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram { + return Histogram{ + Count: &Histogram_CountInt{CountInt: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, + NegativeSpans: spansToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: spansToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + ResetHint: Histogram_ResetHint(h.CounterResetHint), + Timestamp: timestamp, + } +} + +// FromFloatHistogram returns remote Histogram from the float Histogram. +func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram { + return Histogram{ + Count: &Histogram_CountFloat{CountFloat: fh.Count}, + Sum: fh.Sum, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, + NegativeSpans: spansToSpansProto(fh.NegativeSpans), + NegativeCounts: fh.NegativeBuckets, + PositiveSpans: spansToSpansProto(fh.PositiveSpans), + PositiveCounts: fh.PositiveBuckets, + ResetHint: Histogram_ResetHint(fh.CounterResetHint), + Timestamp: timestamp, + } +} + +func spansToSpansProto(s []histogram.Span) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +// ToExemplar converts remote exemplar to model exemplar. +func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, _ []string) exemplar.Exemplar { + timestamp := m.Timestamp + + return exemplar.Exemplar{ + Labels: labelProtosToLabels(b, m.GetLabels()), + Value: m.Value, + Ts: timestamp, + HasTs: timestamp != 0, + } +} diff --git a/prompb/custom.go b/prompb/custom.go index 13d6e0f0cd..f73ddd446b 100644 --- a/prompb/custom.go +++ b/prompb/custom.go @@ -17,14 +17,6 @@ import ( "sync" ) -func (m Sample) T() int64 { return m.Timestamp } -func (m Sample) V() float64 { return m.Value } - -func (h Histogram) IsFloatHistogram() bool { - _, ok := h.GetCount().(*Histogram_CountFloat) - return ok -} - func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { size := r.Size() data, ok := p.Get().(*[]byte) diff --git a/prompb/io/prometheus/client/metrics.pb.go b/prompb/io/prometheus/client/metrics.pb.go index 83a7da7799..17cab6081e 100644 --- a/prompb/io/prometheus/client/metrics.pb.go +++ b/prompb/io/prometheus/client/metrics.pb.go @@ -172,11 +172,12 @@ func (m *Gauge) GetValue() float64 { } type Counter struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + CreatedTimestamp *types.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Counter) Reset() { *m = Counter{} } @@ -226,6 +227,13 @@ func (m *Counter) GetExemplar() *Exemplar { return nil } +func (m *Counter) GetCreatedTimestamp() *types.Timestamp { + if m != nil { + return m.CreatedTimestamp + } + return nil +} + type Quantile struct { Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` @@ -282,12 +290,13 @@ func (m *Quantile) GetValue() float64 { } type Summary struct { - SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` - SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` - Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` + SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` + Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"` + CreatedTimestamp *types.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Summary) Reset() { *m = Summary{} } @@ -344,6 +353,13 @@ func (m *Summary) GetQuantile() []Quantile { return nil } +func (m *Summary) GetCreatedTimestamp() *types.Timestamp { + if m != nil { + return m.CreatedTimestamp + } + return nil +} + type Untyped struct { Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -395,8 +411,9 @@ type Histogram struct { SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"` SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` - // Buckets for the conventional histogram. - Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` + // Buckets for the classic histogram. + Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` + CreatedTimestamp *types.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. @@ -421,11 +438,13 @@ type Histogram struct { // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float // histograms. - PositiveDelta []int64 `protobuf:"zigzag64,13,rep,packed,name=positive_delta,json=positiveDelta,proto3" json:"positive_delta,omitempty"` - PositiveCount []float64 `protobuf:"fixed64,14,rep,packed,name=positive_count,json=positiveCount,proto3" json:"positive_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,packed,name=positive_delta,json=positiveDelta,proto3" json:"positive_delta,omitempty"` + PositiveCount []float64 `protobuf:"fixed64,14,rep,packed,name=positive_count,json=positiveCount,proto3" json:"positive_count,omitempty"` + // Only used for native histograms. These exemplars MUST have a timestamp. + Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Histogram) Reset() { *m = Histogram{} } @@ -489,6 +508,13 @@ func (m *Histogram) GetBucket() []Bucket { return nil } +func (m *Histogram) GetCreatedTimestamp() *types.Timestamp { + if m != nil { + return m.CreatedTimestamp + } + return nil +} + func (m *Histogram) GetSchema() int32 { if m != nil { return m.Schema @@ -559,6 +585,13 @@ func (m *Histogram) GetPositiveCount() []float64 { return nil } +func (m *Histogram) GetExemplars() []*Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + type Bucket struct { CumulativeCount uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount,proto3" json:"cumulative_count,omitempty"` CumulativeCountFloat float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat,proto3" json:"cumulative_count_float,omitempty"` @@ -854,6 +887,7 @@ type MetricFamily struct { Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"` Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"` Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -920,6 +954,13 @@ func (m *MetricFamily) GetMetric() []Metric { return nil } +func (m *MetricFamily) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + func init() { proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") @@ -941,65 +982,69 @@ func init() { } var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 923 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, - 0x18, 0xad, 0x1b, 0xe7, 0xc7, 0x5f, 0x36, 0xdb, 0x74, 0x88, 0x2a, 0x6b, 0x61, 0x37, 0xc1, 0x12, - 0xd2, 0x82, 0x50, 0x22, 0xa0, 0x08, 0x54, 0x40, 0x62, 0xb7, 0xdd, 0x6e, 0x51, 0x49, 0x5b, 0x26, - 0xc9, 0x45, 0xe1, 0xc2, 0x9a, 0x64, 0x67, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf, - 0x25, 0xd7, 0xbc, 0x02, 0x4f, 0x82, 0x7a, 0xc9, 0x13, 0x20, 0xb4, 0xef, 0xc0, 0x3d, 0x9a, 0x3f, - 0x3b, 0x5b, 0x39, 0x85, 0x15, 0x77, 0x33, 0xc7, 0xe7, 0xfb, 0xe6, 0x9c, 0x99, 0xc9, 0x99, 0x80, - 0x17, 0xb2, 0x49, 0x9a, 0xb1, 0x98, 0xf2, 0x35, 0x2d, 0xf2, 0xc9, 0x2a, 0x0a, 0x69, 0xc2, 0x27, - 0x31, 0xe5, 0x59, 0xb8, 0xca, 0xc7, 0x69, 0xc6, 0x38, 0x43, 0x83, 0x90, 0x8d, 0x2b, 0xce, 0x58, - 0x71, 0xf6, 0x06, 0x01, 0x0b, 0x98, 0x24, 0x4c, 0xc4, 0x48, 0x71, 0xf7, 0x86, 0x01, 0x63, 0x41, - 0x44, 0x27, 0x72, 0xb6, 0x2c, 0xce, 0x27, 0x3c, 0x8c, 0x69, 0xce, 0x49, 0x9c, 0x2a, 0x82, 0xf7, - 0x31, 0x38, 0x5f, 0x93, 0x25, 0x8d, 0x9e, 0x91, 0x30, 0x43, 0x08, 0xec, 0x84, 0xc4, 0xd4, 0xb5, - 0x46, 0xd6, 0xa1, 0x83, 0xe5, 0x18, 0x0d, 0xa0, 0xf9, 0x82, 0x44, 0x05, 0x75, 0x6f, 0x4a, 0x50, - 0x4d, 0xbc, 0x7d, 0x68, 0x9e, 0x92, 0x22, 0xd8, 0xf8, 0x2c, 0x6a, 0x2c, 0xf3, 0xf9, 0x3b, 0x68, - 0xdf, 0x67, 0x45, 0xc2, 0x69, 0x56, 0x4f, 0x40, 0xf7, 0xa0, 0x43, 0x7f, 0xa4, 0x71, 0x1a, 0x91, - 0x4c, 0x36, 0xee, 0x7e, 0x78, 0x30, 0xae, 0xb3, 0x35, 0x3e, 0xd1, 0x2c, 0x5c, 0xf2, 0xbd, 0xcf, - 0xa1, 0xf3, 0x4d, 0x41, 0x12, 0x1e, 0x46, 0x14, 0xed, 0x41, 0xe7, 0x07, 0x3d, 0xd6, 0x0b, 0x94, - 0xf3, 0xab, 0xca, 0x4b, 0x69, 0xbf, 0x58, 0xd0, 0x9e, 0x15, 0x71, 0x4c, 0xb2, 0x0b, 0xf4, 0x36, - 0xec, 0xe4, 0x24, 0x4e, 0x23, 0xea, 0xaf, 0x84, 0x5a, 0xd9, 0xc1, 0xc6, 0x5d, 0x85, 0x49, 0x03, - 0x68, 0x1f, 0x40, 0x53, 0xf2, 0x22, 0xd6, 0x9d, 0x1c, 0x85, 0xcc, 0x8a, 0x18, 0x7d, 0xb9, 0xb1, - 0x7e, 0x63, 0xd4, 0xd8, 0xee, 0xc3, 0x28, 0x3e, 0xb6, 0x5f, 0xfe, 0x39, 0xbc, 0x51, 0xa9, 0xf4, - 0x86, 0xd0, 0x5e, 0x24, 0xfc, 0x22, 0xa5, 0x67, 0x5b, 0xf6, 0xf2, 0x6f, 0x1b, 0x9c, 0x47, 0x61, - 0xce, 0x59, 0x90, 0x91, 0xf8, 0xbf, 0x48, 0x7e, 0x1f, 0xd0, 0x26, 0xc5, 0x3f, 0x8f, 0x18, 0xe1, - 0xae, 0x2d, 0x7b, 0xf6, 0x37, 0x88, 0x0f, 0x05, 0xfe, 0x6f, 0x06, 0xef, 0x41, 0x6b, 0x59, 0xac, - 0xbe, 0xa7, 0x5c, 0xdb, 0x7b, 0xab, 0xde, 0xde, 0xb1, 0xe4, 0x68, 0x73, 0xba, 0x02, 0xdd, 0x81, - 0x56, 0xbe, 0x5a, 0xd3, 0x98, 0xb8, 0xcd, 0x91, 0x75, 0x78, 0x1b, 0xeb, 0x19, 0x7a, 0x07, 0x76, - 0x7f, 0xa2, 0x19, 0xf3, 0xf9, 0x3a, 0xa3, 0xf9, 0x9a, 0x45, 0x67, 0x6e, 0x4b, 0x2e, 0xdb, 0x13, - 0xe8, 0xdc, 0x80, 0x42, 0x99, 0xa4, 0x29, 0xa3, 0x6d, 0x69, 0xd4, 0x11, 0x88, 0xb2, 0x79, 0x08, - 0xfd, 0xea, 0xb3, 0x36, 0xd9, 0x91, 0x7d, 0x76, 0x4b, 0x92, 0xb2, 0xf8, 0x18, 0x7a, 0x09, 0x0d, - 0x08, 0x0f, 0x5f, 0x50, 0x3f, 0x4f, 0x49, 0xe2, 0x3a, 0xd2, 0xca, 0xe8, 0x75, 0x56, 0x66, 0x29, - 0x49, 0xb4, 0x9d, 0x1d, 0x53, 0x2c, 0x30, 0x21, 0xbe, 0x6c, 0x76, 0x46, 0x23, 0x4e, 0x5c, 0x18, - 0x35, 0x0e, 0x11, 0x2e, 0x97, 0x78, 0x20, 0xc0, 0x2b, 0x34, 0x65, 0xa0, 0x3b, 0x6a, 0x08, 0x8f, - 0x06, 0x55, 0x26, 0x1e, 0x43, 0x2f, 0x65, 0x79, 0x58, 0x49, 0xdb, 0xb9, 0x9e, 0x34, 0x53, 0x6c, - 0xa4, 0x95, 0xcd, 0x94, 0xb4, 0x9e, 0x92, 0x66, 0xd0, 0x52, 0x5a, 0x49, 0x53, 0xd2, 0x76, 0x95, - 0x34, 0x83, 0x4a, 0x69, 0xde, 0xef, 0x16, 0xb4, 0xd4, 0x82, 0xe8, 0x5d, 0xe8, 0xaf, 0x8a, 0xb8, - 0x88, 0x36, 0xed, 0xa8, 0x8b, 0x77, 0xab, 0xc2, 0x95, 0xa1, 0xbb, 0x70, 0xe7, 0x55, 0xea, 0x95, - 0x0b, 0x38, 0x78, 0xa5, 0x40, 0x9d, 0xd0, 0x10, 0xba, 0x45, 0x9a, 0xd2, 0xcc, 0x5f, 0xb2, 0x22, - 0x39, 0xd3, 0xb7, 0x10, 0x24, 0x74, 0x2c, 0x90, 0x2b, 0x79, 0xd1, 0xb8, 0x76, 0x5e, 0x40, 0xb5, - 0x71, 0xe2, 0x52, 0xb2, 0xf3, 0xf3, 0x9c, 0x2a, 0x07, 0xb7, 0xb1, 0x9e, 0x09, 0x3c, 0xa2, 0x49, - 0xc0, 0xd7, 0x72, 0xf5, 0x1e, 0xd6, 0x33, 0xef, 0x57, 0x0b, 0x3a, 0xa6, 0x29, 0xfa, 0x0c, 0x9a, - 0x91, 0x48, 0x4b, 0xd7, 0x92, 0xc7, 0x34, 0xac, 0xd7, 0x50, 0x06, 0xaa, 0x3e, 0x25, 0x55, 0x53, - 0x9f, 0x47, 0xe8, 0x53, 0x70, 0xca, 0x4c, 0xd6, 0xd6, 0xf6, 0xc6, 0x2a, 0xb5, 0xc7, 0x26, 0xb5, - 0xc7, 0x73, 0xc3, 0xc0, 0x15, 0xd9, 0xfb, 0xb9, 0x01, 0xad, 0xa9, 0x7c, 0x19, 0xfe, 0x9f, 0xae, - 0x0f, 0xa0, 0x19, 0x88, 0x2c, 0xd7, 0x41, 0xfc, 0x66, 0x7d, 0xb1, 0x8c, 0x7b, 0xac, 0x98, 0xe8, - 0x13, 0x68, 0xaf, 0x54, 0xbe, 0x6b, 0xc9, 0xfb, 0xf5, 0x45, 0xfa, 0x11, 0xc0, 0x86, 0x2d, 0x0a, - 0x73, 0x15, 0xbe, 0xf2, 0x3e, 0x6c, 0x2d, 0xd4, 0x09, 0x8d, 0x0d, 0x5b, 0x14, 0x16, 0x2a, 0x26, - 0x65, 0x98, 0x6c, 0x2d, 0xd4, 0x59, 0x8a, 0x0d, 0x1b, 0x7d, 0x01, 0xce, 0xda, 0xa4, 0xa7, 0x0c, - 0x91, 0xad, 0xdb, 0x53, 0x86, 0x2c, 0xae, 0x2a, 0x44, 0xde, 0x96, 0x3b, 0xee, 0xc7, 0xb9, 0x4c, - 0xaa, 0x06, 0xee, 0x96, 0xd8, 0x34, 0xf7, 0x7e, 0xb3, 0x60, 0x47, 0x9d, 0xc3, 0x43, 0x12, 0x87, - 0xd1, 0x45, 0xed, 0x33, 0x8a, 0xc0, 0x5e, 0xd3, 0x28, 0xd5, 0xaf, 0xa8, 0x1c, 0xa3, 0xbb, 0x60, - 0x0b, 0x8d, 0x72, 0x0b, 0x77, 0xb7, 0xfd, 0xe6, 0x55, 0xe7, 0xf9, 0x45, 0x4a, 0xb1, 0x64, 0x8b, - 0x44, 0x56, 0xff, 0x07, 0x5c, 0xfb, 0x75, 0x89, 0xac, 0xea, 0x4c, 0x22, 0xab, 0x8a, 0xf7, 0x96, - 0x00, 0x55, 0x3f, 0xd4, 0x85, 0xf6, 0xfd, 0xa7, 0x8b, 0x27, 0xf3, 0x13, 0xdc, 0xbf, 0x81, 0x1c, - 0x68, 0x9e, 0x1e, 0x2d, 0x4e, 0x4f, 0xfa, 0x96, 0xc0, 0x67, 0x8b, 0xe9, 0xf4, 0x08, 0x3f, 0xef, - 0xdf, 0x14, 0x93, 0xc5, 0x93, 0xf9, 0xf3, 0x67, 0x27, 0x0f, 0xfa, 0x0d, 0xd4, 0x03, 0xe7, 0xd1, - 0x57, 0xb3, 0xf9, 0xd3, 0x53, 0x7c, 0x34, 0xed, 0xdb, 0xe8, 0x0d, 0xb8, 0x25, 0x6b, 0xfc, 0x0a, - 0x6c, 0x1e, 0x7b, 0x2f, 0x2f, 0x0f, 0xac, 0x3f, 0x2e, 0x0f, 0xac, 0xbf, 0x2e, 0x0f, 0xac, 0x6f, - 0x07, 0x21, 0xf3, 0x2b, 0x71, 0xbe, 0x12, 0xb7, 0x6c, 0xc9, 0x9b, 0xfd, 0xd1, 0x3f, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x52, 0x2d, 0xb5, 0x31, 0xef, 0x08, 0x00, 0x00, + // 982 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x8f, 0xdb, 0x44, + 0x18, 0xae, 0x9b, 0x4f, 0xbf, 0xd9, 0x6c, 0xbd, 0x43, 0x54, 0x59, 0x0b, 0xbb, 0x09, 0x96, 0x90, + 0x16, 0x84, 0x12, 0x01, 0x45, 0xa0, 0xb2, 0x48, 0xec, 0xb6, 0xdb, 0x14, 0x95, 0xb4, 0x65, 0x92, + 0x1c, 0xca, 0xc5, 0x9a, 0x24, 0xb3, 0x8e, 0x85, 0xbf, 0xb0, 0xc7, 0x15, 0xcb, 0x9d, 0xdf, 0xc0, + 0x1f, 0xe0, 0x67, 0x70, 0x46, 0x3d, 0x72, 0xe2, 0x88, 0xd0, 0xfe, 0x0e, 0x0e, 0x68, 0xbe, 0xec, + 0x6c, 0xe5, 0x2c, 0x2c, 0xdc, 0x3c, 0x8f, 0x9f, 0x67, 0xe6, 0x79, 0x1f, 0xdb, 0xef, 0x6b, 0x70, + 0xfc, 0x78, 0x94, 0xa4, 0x71, 0x48, 0xd9, 0x9a, 0xe6, 0xd9, 0x68, 0x19, 0xf8, 0x34, 0x62, 0xa3, + 0x90, 0xb2, 0xd4, 0x5f, 0x66, 0xc3, 0x24, 0x8d, 0x59, 0x8c, 0x7a, 0x7e, 0x3c, 0x2c, 0x39, 0x43, + 0xc9, 0xd9, 0xef, 0x79, 0xb1, 0x17, 0x0b, 0xc2, 0x88, 0x5f, 0x49, 0xee, 0x7e, 0xdf, 0x8b, 0x63, + 0x2f, 0xa0, 0x23, 0xb1, 0x5a, 0xe4, 0xe7, 0x23, 0xe6, 0x87, 0x34, 0x63, 0x24, 0x4c, 0x24, 0xc1, + 0xf9, 0x18, 0xcc, 0xaf, 0xc8, 0x82, 0x06, 0xcf, 0x89, 0x9f, 0x22, 0x04, 0xf5, 0x88, 0x84, 0xd4, + 0x36, 0x06, 0xc6, 0x91, 0x89, 0xc5, 0x35, 0xea, 0x41, 0xe3, 0x25, 0x09, 0x72, 0x6a, 0xdf, 0x16, + 0xa0, 0x5c, 0x38, 0x07, 0xd0, 0x18, 0x93, 0xdc, 0xdb, 0xb8, 0xcd, 0x35, 0x86, 0xbe, 0xfd, 0xb3, + 0x01, 0xad, 0x07, 0x71, 0x1e, 0x31, 0x9a, 0x56, 0x33, 0xd0, 0x7d, 0x68, 0xd3, 0xef, 0x69, 0x98, + 0x04, 0x24, 0x15, 0x3b, 0x77, 0x3e, 0x3c, 0x1c, 0x56, 0xd5, 0x35, 0x3c, 0x53, 0x2c, 0x5c, 0xf0, + 0xd1, 0x18, 0xf6, 0x96, 0x29, 0x25, 0x8c, 0xae, 0xdc, 0xa2, 0x1c, 0xbb, 0x26, 0x36, 0xd9, 0x1f, + 0xca, 0x82, 0x87, 0xba, 0xe0, 0xe1, 0x4c, 0x33, 0xb0, 0xa5, 0x44, 0x05, 0xe2, 0x1c, 0x43, 0xfb, + 0xeb, 0x9c, 0x44, 0xcc, 0x0f, 0x28, 0xda, 0x87, 0xf6, 0x77, 0xea, 0x5a, 0x39, 0x2d, 0xd6, 0x57, + 0x33, 0x28, 0x8a, 0xfc, 0xdd, 0x80, 0xd6, 0x34, 0x0f, 0x43, 0x92, 0x5e, 0xa0, 0xb7, 0x61, 0x27, + 0x23, 0x61, 0x12, 0x50, 0x77, 0xc9, 0xcb, 0x16, 0x3b, 0xd4, 0x71, 0x47, 0x62, 0x22, 0x09, 0x74, + 0x00, 0xa0, 0x28, 0x59, 0x1e, 0xaa, 0x9d, 0x4c, 0x89, 0x4c, 0xf3, 0x10, 0x7d, 0xb1, 0x71, 0x7e, + 0x6d, 0x50, 0xdb, 0x1e, 0x88, 0x76, 0x7c, 0x5a, 0x7f, 0xf5, 0x47, 0xff, 0xd6, 0x86, 0xcb, 0xca, + 0x58, 0xea, 0xff, 0x21, 0x96, 0x3e, 0xb4, 0xe6, 0x11, 0xbb, 0x48, 0xe8, 0x6a, 0xcb, 0xe3, 0xfd, + 0xab, 0x01, 0xe6, 0x63, 0x3f, 0x63, 0xb1, 0x97, 0x92, 0xf0, 0xdf, 0xd4, 0xfe, 0x3e, 0xa0, 0x4d, + 0x8a, 0x7b, 0x1e, 0xc4, 0x84, 0x09, 0x6f, 0x06, 0xb6, 0x36, 0x88, 0x8f, 0x38, 0xfe, 0x4f, 0x49, + 0xdd, 0x87, 0xe6, 0x22, 0x5f, 0x7e, 0x4b, 0x99, 0xca, 0xe9, 0xad, 0xea, 0x9c, 0x4e, 0x05, 0x47, + 0xa5, 0xa4, 0x14, 0xd5, 0x19, 0xdd, 0xb9, 0x79, 0x46, 0xe8, 0x2e, 0x34, 0xb3, 0xe5, 0x9a, 0x86, + 0xc4, 0x6e, 0x0c, 0x8c, 0xa3, 0x3d, 0xac, 0x56, 0xe8, 0x1d, 0xd8, 0xfd, 0x81, 0xa6, 0xb1, 0xcb, + 0xd6, 0x29, 0xcd, 0xd6, 0x71, 0xb0, 0xb2, 0x9b, 0xc2, 0x7f, 0x97, 0xa3, 0x33, 0x0d, 0xf2, 0x12, + 0x05, 0x4d, 0x26, 0xd6, 0x12, 0x89, 0x99, 0x1c, 0x91, 0x79, 0x1d, 0x81, 0x55, 0xde, 0x56, 0x69, + 0xb5, 0xc5, 0x3e, 0xbb, 0x05, 0x49, 0x66, 0xf5, 0x04, 0xba, 0x11, 0xf5, 0x08, 0xf3, 0x5f, 0x52, + 0x37, 0x4b, 0x48, 0x64, 0x9b, 0x22, 0x93, 0xc1, 0x75, 0x99, 0x4c, 0x13, 0x12, 0xa9, 0x5c, 0x76, + 0xb4, 0x98, 0x63, 0xdc, 0x7c, 0xb1, 0xd9, 0x8a, 0x06, 0x8c, 0xd8, 0x30, 0xa8, 0x1d, 0x21, 0x5c, + 0x1c, 0xf1, 0x90, 0x83, 0x57, 0x68, 0xb2, 0x80, 0xce, 0xa0, 0xc6, 0x6b, 0xd4, 0xa8, 0x2c, 0xe2, + 0x09, 0x74, 0x93, 0x38, 0xf3, 0x4b, 0x6b, 0x3b, 0x37, 0xb3, 0xa6, 0xc5, 0xda, 0x5a, 0xb1, 0x99, + 0xb4, 0xd6, 0x95, 0xd6, 0x34, 0x5a, 0x58, 0x2b, 0x68, 0xd2, 0xda, 0xae, 0xb4, 0xa6, 0x51, 0x69, + 0xed, 0x18, 0x4c, 0xdd, 0x4d, 0x32, 0xdb, 0xba, 0xee, 0x6b, 0x2b, 0xda, 0x4f, 0x29, 0x70, 0x7e, + 0x35, 0xa0, 0x29, 0xed, 0xa2, 0x77, 0xc1, 0x5a, 0xe6, 0x61, 0x1e, 0x6c, 0x86, 0x21, 0xdf, 0xff, + 0x3b, 0x25, 0x2e, 0xcf, 0xbc, 0x07, 0x77, 0x5f, 0xa7, 0x5e, 0xf9, 0x0e, 0x7a, 0xaf, 0x09, 0xe4, + 0xf3, 0xed, 0x43, 0x27, 0x4f, 0x12, 0x9a, 0xba, 0x8b, 0x38, 0x8f, 0x56, 0xea, 0x63, 0x00, 0x01, + 0x9d, 0x72, 0xe4, 0x4a, 0x23, 0xad, 0xdd, 0xac, 0x91, 0x3a, 0xc7, 0x00, 0x65, 0xec, 0xfc, 0x95, + 0x8e, 0xcf, 0xcf, 0x33, 0x2a, 0x2b, 0xd8, 0xc3, 0x6a, 0xc5, 0xf1, 0x80, 0x46, 0x1e, 0x5b, 0x8b, + 0xd3, 0xbb, 0x58, 0xad, 0x9c, 0x9f, 0x0c, 0x68, 0xeb, 0x4d, 0xd1, 0x67, 0xd0, 0x08, 0xf8, 0x1c, + 0xb1, 0x0d, 0x91, 0x66, 0xbf, 0xda, 0x43, 0x31, 0x6a, 0xd4, 0x33, 0x96, 0x9a, 0xea, 0xfe, 0x8a, + 0x3e, 0x05, 0xf3, 0x26, 0xed, 0xbd, 0x24, 0x3b, 0x3f, 0xd6, 0xa0, 0x39, 0x11, 0x33, 0xf3, 0xff, + 0xf9, 0xfa, 0x00, 0x1a, 0x1e, 0x9f, 0x72, 0x6a, 0x42, 0xbd, 0x59, 0x2d, 0x16, 0x83, 0x10, 0x4b, + 0x26, 0xfa, 0x04, 0x5a, 0x4b, 0x39, 0xf8, 0x94, 0xe5, 0x83, 0x6a, 0x91, 0x9a, 0x8e, 0x58, 0xb3, + 0xb9, 0x30, 0x93, 0xc3, 0x44, 0xf5, 0xec, 0x2d, 0x42, 0x35, 0x71, 0xb0, 0x66, 0x73, 0x61, 0x2e, + 0xbb, 0xb5, 0x68, 0x45, 0x5b, 0x85, 0xaa, 0xa5, 0x63, 0xcd, 0x46, 0x9f, 0x83, 0xb9, 0xd6, 0x4d, + 0x5c, 0xb4, 0xa0, 0xad, 0xf1, 0x14, 0xbd, 0x1e, 0x97, 0x0a, 0xde, 0xf6, 0x8b, 0xc4, 0xdd, 0x30, + 0x13, 0x7d, 0xae, 0x86, 0x3b, 0x05, 0x36, 0xc9, 0x9c, 0x5f, 0x0c, 0xd8, 0x91, 0xcf, 0xe1, 0x11, + 0x09, 0xfd, 0xe0, 0xa2, 0xf2, 0x07, 0x03, 0x41, 0x7d, 0x4d, 0x83, 0x44, 0xfd, 0x5f, 0x88, 0x6b, + 0x74, 0x0f, 0xea, 0xdc, 0xa3, 0x88, 0x70, 0x77, 0x5b, 0xc7, 0x90, 0x3b, 0xcf, 0x2e, 0x12, 0x8a, + 0x05, 0x9b, 0x0f, 0x06, 0xf9, 0xa7, 0x64, 0xd7, 0xaf, 0x1b, 0x0c, 0x52, 0xa7, 0x07, 0x83, 0x54, + 0x70, 0x17, 0x79, 0xe4, 0x33, 0x11, 0xa1, 0x89, 0xc5, 0xf5, 0x7b, 0x0b, 0x80, 0xf2, 0x0c, 0xd4, + 0x81, 0xd6, 0x83, 0x67, 0xf3, 0xa7, 0xb3, 0x33, 0x6c, 0xdd, 0x42, 0x26, 0x34, 0xc6, 0x27, 0xf3, + 0xf1, 0x99, 0x65, 0x70, 0x7c, 0x3a, 0x9f, 0x4c, 0x4e, 0xf0, 0x0b, 0xeb, 0x36, 0x5f, 0xcc, 0x9f, + 0xce, 0x5e, 0x3c, 0x3f, 0x7b, 0x68, 0xd5, 0x50, 0x17, 0xcc, 0xc7, 0x5f, 0x4e, 0x67, 0xcf, 0xc6, + 0xf8, 0x64, 0x62, 0xd5, 0xd1, 0x1b, 0x70, 0x47, 0x68, 0xdc, 0x12, 0x6c, 0x9c, 0x3a, 0xaf, 0x2e, + 0x0f, 0x8d, 0xdf, 0x2e, 0x0f, 0x8d, 0x3f, 0x2f, 0x0f, 0x8d, 0x6f, 0x7a, 0x7e, 0xec, 0x96, 0x86, + 0x5d, 0x69, 0x78, 0xd1, 0x14, 0x6f, 0xfb, 0x47, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x1c, 0xe1, + 0xcf, 0xb8, 0x1d, 0x0a, 0x00, 0x00, } func (m *LabelPair) Marshal() (dAtA []byte, err error) { @@ -1100,6 +1145,18 @@ func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.CreatedTimestamp != nil { + { + size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if m.Exemplar != nil { { size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) @@ -1184,6 +1241,18 @@ func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.CreatedTimestamp != nil { + { + size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if len(m.Quantile) > 0 { for iNdEx := len(m.Quantile) - 1; iNdEx >= 0; iNdEx-- { { @@ -1269,32 +1338,60 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if m.CreatedTimestamp != nil { + { + size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } if len(m.PositiveCount) > 0 { for iNdEx := len(m.PositiveCount) - 1; iNdEx >= 0; iNdEx-- { - f2 := math.Float64bits(float64(m.PositiveCount[iNdEx])) + f5 := math.Float64bits(float64(m.PositiveCount[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) } i = encodeVarintMetrics(dAtA, i, uint64(len(m.PositiveCount)*8)) i-- dAtA[i] = 0x72 } if len(m.PositiveDelta) > 0 { - var j3 int - dAtA5 := make([]byte, len(m.PositiveDelta)*10) + var j6 int + dAtA8 := make([]byte, len(m.PositiveDelta)*10) for _, num := range m.PositiveDelta { - x4 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x4 >= 1<<7 { - dAtA5[j3] = uint8(uint64(x4)&0x7f | 0x80) - j3++ - x4 >>= 7 - } - dAtA5[j3] = uint8(x4) - j3++ - } - i -= j3 - copy(dAtA[i:], dAtA5[:j3]) - i = encodeVarintMetrics(dAtA, i, uint64(j3)) + x7 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x7 >= 1<<7 { + dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) + j6++ + x7 >>= 7 + } + dAtA8[j6] = uint8(x7) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA8[:j6]) + i = encodeVarintMetrics(dAtA, i, uint64(j6)) i-- dAtA[i] = 0x6a } @@ -1314,30 +1411,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.NegativeCount) > 0 { for iNdEx := len(m.NegativeCount) - 1; iNdEx >= 0; iNdEx-- { - f6 := math.Float64bits(float64(m.NegativeCount[iNdEx])) + f9 := math.Float64bits(float64(m.NegativeCount[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f9)) } i = encodeVarintMetrics(dAtA, i, uint64(len(m.NegativeCount)*8)) i-- dAtA[i] = 0x5a } if len(m.NegativeDelta) > 0 { - var j7 int - dAtA9 := make([]byte, len(m.NegativeDelta)*10) + var j10 int + dAtA12 := make([]byte, len(m.NegativeDelta)*10) for _, num := range m.NegativeDelta { - x8 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x8 >= 1<<7 { - dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80) - j7++ - x8 >>= 7 - } - dAtA9[j7] = uint8(x8) - j7++ - } - i -= j7 - copy(dAtA[i:], dAtA9[:j7]) - i = encodeVarintMetrics(dAtA, i, uint64(j7)) + x11 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x11 >= 1<<7 { + dAtA12[j10] = uint8(uint64(x11)&0x7f | 0x80) + j10++ + x11 >>= 7 + } + dAtA12[j10] = uint8(x11) + j10++ + } + i -= j10 + copy(dAtA[i:], dAtA12[:j10]) + i = encodeVarintMetrics(dAtA, i, uint64(j10)) i-- dAtA[i] = 0x52 } @@ -1693,6 +1790,13 @@ func (m *MetricFamily) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x2a + } if len(m.Metric) > 0 { for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { { @@ -1788,6 +1892,10 @@ func (m *Counter) Size() (n int) { l = m.Exemplar.Size() n += 1 + l + sovMetrics(uint64(l)) } + if m.CreatedTimestamp != nil { + l = m.CreatedTimestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1830,6 +1938,10 @@ func (m *Summary) Size() (n int) { n += 1 + l + sovMetrics(uint64(l)) } } + if m.CreatedTimestamp != nil { + l = m.CreatedTimestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1916,6 +2028,16 @@ func (m *Histogram) Size() (n int) { if len(m.PositiveCount) > 0 { n += 1 + sovMetrics(uint64(len(m.PositiveCount)*8)) + len(m.PositiveCount)*8 } + if m.CreatedTimestamp != nil { + l = m.CreatedTimestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -2054,6 +2176,10 @@ func (m *MetricFamily) Size() (n int) { n += 1 + l + sovMetrics(uint64(l)) } } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -2319,6 +2445,42 @@ func (m *Counter) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedTimestamp == nil { + m.CreatedTimestamp = &types.Timestamp{} + } + if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) @@ -2507,6 +2669,42 @@ func (m *Summary) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedTimestamp == nil { + m.CreatedTimestamp = &types.Timestamp{} + } + if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) @@ -3089,6 +3287,76 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { } else { return fmt.Errorf("proto: wrong wireType = %d for field PositiveCount", wireType) } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedTimestamp == nil { + m.CreatedTimestamp = &types.Timestamp{} + } + if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, &Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) @@ -3892,6 +4160,38 @@ func (m *MetricFamily) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) diff --git a/prompb/io/prometheus/client/metrics.proto b/prompb/io/prometheus/client/metrics.proto index 3fef2b6d00..fe55638bb7 100644 --- a/prompb/io/prometheus/client/metrics.proto +++ b/prompb/io/prometheus/client/metrics.proto @@ -51,6 +51,8 @@ message Gauge { message Counter { double value = 1; Exemplar exemplar = 2; + + google.protobuf.Timestamp created_timestamp = 3; } message Quantile { @@ -62,6 +64,8 @@ message Summary { uint64 sample_count = 1; double sample_sum = 2; repeated Quantile quantile = 3 [(gogoproto.nullable) = false]; + + google.protobuf.Timestamp created_timestamp = 4; } message Untyped { @@ -72,9 +76,11 @@ message Histogram { uint64 sample_count = 1; double sample_count_float = 4; // Overrides sample_count if > 0. double sample_sum = 2; - // Buckets for the conventional histogram. + // Buckets for the classic histogram. repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional. + google.protobuf.Timestamp created_timestamp = 15; + // Everything below here is for native histograms (also known as sparse histograms). // Native histograms are an experimental feature without stability guarantees. @@ -106,6 +112,9 @@ message Histogram { // histograms. repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). repeated double positive_count = 14; // Absolute count of each bucket. + + // Only used for native histograms. These exemplars MUST have a timestamp. + repeated Exemplar exemplars = 16; } message Bucket { @@ -147,4 +156,5 @@ message MetricFamily { string help = 2; MetricType type = 3; repeated Metric metric = 4 [(gogoproto.nullable) = false]; + string unit = 5; } diff --git a/prompb/io/prometheus/write/v2/codec.go b/prompb/io/prometheus/write/v2/codec.go new file mode 100644 index 0000000000..25fa0d4035 --- /dev/null +++ b/prompb/io/prometheus/write/v2/codec.go @@ -0,0 +1,216 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" +) + +// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon. + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels { + return desymbolizeLabels(b, m.GetLabelsRefs(), symbols) +} + +// ToMetadata return model metadata from timeseries' remote metadata. +func (m TimeSeries) ToMetadata(symbols []string) metadata.Metadata { + typ := model.MetricTypeUnknown + switch m.Metadata.Type { + case Metadata_METRIC_TYPE_COUNTER: + typ = model.MetricTypeCounter + case Metadata_METRIC_TYPE_GAUGE: + typ = model.MetricTypeGauge + case Metadata_METRIC_TYPE_HISTOGRAM: + typ = model.MetricTypeHistogram + case Metadata_METRIC_TYPE_GAUGEHISTOGRAM: + typ = model.MetricTypeGaugeHistogram + case Metadata_METRIC_TYPE_SUMMARY: + typ = model.MetricTypeSummary + case Metadata_METRIC_TYPE_INFO: + typ = model.MetricTypeInfo + case Metadata_METRIC_TYPE_STATESET: + typ = model.MetricTypeStateset + } + return metadata.Metadata{ + Type: typ, + Unit: symbols[m.Metadata.UnitRef], + Help: symbols[m.Metadata.HelpRef], + } +} + +// FromMetadataType transforms a Prometheus metricType into writev2 metricType. +// Since the former is a string we need to transform it to an enum. +func FromMetadataType(t model.MetricType) Metadata_MetricType { + switch t { + case model.MetricTypeCounter: + return Metadata_METRIC_TYPE_COUNTER + case model.MetricTypeGauge: + return Metadata_METRIC_TYPE_GAUGE + case model.MetricTypeHistogram: + return Metadata_METRIC_TYPE_HISTOGRAM + case model.MetricTypeGaugeHistogram: + return Metadata_METRIC_TYPE_GAUGEHISTOGRAM + case model.MetricTypeSummary: + return Metadata_METRIC_TYPE_SUMMARY + case model.MetricTypeInfo: + return Metadata_METRIC_TYPE_INFO + case model.MetricTypeStateset: + return Metadata_METRIC_TYPE_STATESET + default: + return Metadata_METRIC_TYPE_UNSPECIFIED + } +} + +// IsFloatHistogram returns true if the histogram is float. +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + +// ToIntHistogram returns integer Prometheus histogram from the remote implementation +// of integer histogram. If it's a float histogram, the method returns nil. +func (h Histogram) ToIntHistogram() *histogram.Histogram { + if h.IsFloatHistogram() { + return nil + } + return &histogram.Histogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountInt(), + Count: h.GetCountInt(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeDeltas(), + CustomValues: h.GetCustomValues(), + } +} + +// ToFloatHistogram returns float Prometheus histogram from the remote implementation +// of float histogram. If the underlying implementation is an integer histogram, a +// conversion is performed. +func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { + if h.IsFloatHistogram() { + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountFloat(), + Count: h.GetCountFloat(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveCounts(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeCounts(), + CustomValues: h.GetCustomValues(), + } + } + // Conversion from integer histogram. + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.GetZeroCountInt()), + Count: float64(h.GetCountInt()), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()), + CustomValues: h.GetCustomValues(), + } +} + +func spansProtoToSpans(s []BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func deltasToCounts(deltas []int64) []float64 { + counts := make([]float64, len(deltas)) + var cur float64 + for i, d := range deltas { + cur += float64(d) + counts[i] = cur + } + return counts +} + +// FromIntHistogram returns remote Histogram from the integer Histogram. +func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram { + return Histogram{ + Count: &Histogram_CountInt{CountInt: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, + NegativeSpans: spansToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: spansToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + ResetHint: Histogram_ResetHint(h.CounterResetHint), + CustomValues: h.CustomValues, + Timestamp: timestamp, + } +} + +// FromFloatHistogram returns remote Histogram from the float Histogram. +func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram { + return Histogram{ + Count: &Histogram_CountFloat{CountFloat: fh.Count}, + Sum: fh.Sum, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, + NegativeSpans: spansToSpansProto(fh.NegativeSpans), + NegativeCounts: fh.NegativeBuckets, + PositiveSpans: spansToSpansProto(fh.PositiveSpans), + PositiveCounts: fh.PositiveBuckets, + ResetHint: Histogram_ResetHint(fh.CounterResetHint), + CustomValues: fh.CustomValues, + Timestamp: timestamp, + } +} + +func spansToSpansProto(s []histogram.Span) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) exemplar.Exemplar { + timestamp := m.Timestamp + + return exemplar.Exemplar{ + Labels: desymbolizeLabels(b, m.LabelsRefs, symbols), + Value: m.Value, + Ts: timestamp, + HasTs: timestamp != 0, + } +} diff --git a/prompb/io/prometheus/write/v2/custom.go b/prompb/io/prometheus/write/v2/custom.go new file mode 100644 index 0000000000..3aa778eb60 --- /dev/null +++ b/prompb/io/prometheus/write/v2/custom.go @@ -0,0 +1,165 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "slices" +) + +func (m Sample) T() int64 { return m.Timestamp } +func (m Sample) V() float64 { return m.Value } + +func (m *Request) OptimizedMarshal(dst []byte) ([]byte, error) { + siz := m.Size() + if cap(dst) < siz { + dst = make([]byte, siz) + } + n, err := m.OptimizedMarshalToSizedBuffer(dst[:siz]) + if err != nil { + return nil, err + } + return dst[:n], nil +} + +// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer, +// but calls OptimizedMarshalToSizedBuffer on the timeseries. +func (m *Request) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].OptimizedMarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Symbols) > 0 { + for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Symbols[iNdEx]) + copy(dAtA[i:], m.Symbols[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + return len(dAtA) - i, nil +} + +// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer, +// but marshals m.LabelsRefs in place without extra allocations. +func (m *TimeSeries) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreatedTimestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + + if len(m.LabelsRefs) > 0 { + // This is the trick: encode the varints in reverse order to make it easier + // to do it in place. Then reverse the whole thing. + var j10 int + start := i + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA[i-1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + i-- + j10++ + } + dAtA[i-1] = uint8(num) + i-- + j10++ + } + slices.Reverse(dAtA[i:start]) + // --- end of trick + + i = encodeVarintTypes(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} diff --git a/prompb/io/prometheus/write/v2/custom_test.go b/prompb/io/prometheus/write/v2/custom_test.go new file mode 100644 index 0000000000..139cbfb225 --- /dev/null +++ b/prompb/io/prometheus/write/v2/custom_test.go @@ -0,0 +1,97 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package writev2 + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestOptimizedMarshal(t *testing.T) { + for _, tt := range []struct { + name string + m *Request + }{ + { + name: "empty", + m: &Request{}, + }, + { + name: "simple", + m: &Request{ + Timeseries: []TimeSeries{ + { + LabelsRefs: []uint32{ + 0, 1, + 2, 3, + 4, 5, + 6, 7, + 8, 9, + 10, 11, + 12, 13, + 14, 15, + }, + + Samples: []Sample{{Value: 1, Timestamp: 0}}, + Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 1, Timestamp: 0}}, + Histograms: nil, + }, + { + LabelsRefs: []uint32{ + 0, 1, + 2, 3, + 4, 5, + 6, 7, + 8, 9, + 10, 11, + 12, 13, + 14, 15, + }, + Samples: []Sample{{Value: 2, Timestamp: 1}}, + Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 2, Timestamp: 1}}, + Histograms: nil, + }, + }, + Symbols: []string{ + "a", "b", + "c", "d", + "e", "f", + "g", "h", + "i", "j", + "k", "l", + "m", "n", + "o", "p", + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + // Keep the slice allocated to mimic what std Marshal + // would give to sized Marshal. + got := make([]byte, 0) + + // Should be the same as the standard marshal. + expected, err := tt.m.Marshal() + require.NoError(t, err) + got, err = tt.m.OptimizedMarshal(got) + require.NoError(t, err) + require.Equal(t, expected, got) + + // Unmarshal should work too. + m := &Request{} + require.NoError(t, m.Unmarshal(got)) + require.Equal(t, tt.m, m) + }) + } +} diff --git a/prompb/io/prometheus/write/v2/symbols.go b/prompb/io/prometheus/write/v2/symbols.go new file mode 100644 index 0000000000..f316a976f2 --- /dev/null +++ b/prompb/io/prometheus/write/v2/symbols.go @@ -0,0 +1,83 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import "github.com/prometheus/prometheus/model/labels" + +// SymbolsTable implements table for easy symbol use. +type SymbolsTable struct { + strings []string + symbolsMap map[string]uint32 +} + +// NewSymbolTable returns a symbol table. +func NewSymbolTable() SymbolsTable { + return SymbolsTable{ + // Empty string is required as a first element. + symbolsMap: map[string]uint32{"": 0}, + strings: []string{""}, + } +} + +// Symbolize adds (if not added before) a string to the symbols table, +// while returning its reference number. +func (t *SymbolsTable) Symbolize(str string) uint32 { + if ref, ok := t.symbolsMap[str]; ok { + return ref + } + ref := uint32(len(t.strings)) + t.strings = append(t.strings, str) + t.symbolsMap[str] = ref + return ref +} + +// SymbolizeLabels symbolize Prometheus labels. +func (t *SymbolsTable) SymbolizeLabels(lbls labels.Labels, buf []uint32) []uint32 { + result := buf[:0] + lbls.Range(func(l labels.Label) { + off := t.Symbolize(l.Name) + result = append(result, off) + off = t.Symbolize(l.Value) + result = append(result, off) + }) + return result +} + +// Symbols returns computes symbols table to put in e.g. Request.Symbols. +// As per spec, order does not matter. +func (t *SymbolsTable) Symbols() []string { + return t.strings +} + +// Reset clears symbols table. +func (t *SymbolsTable) Reset() { + // NOTE: Make sure to keep empty symbol. + t.strings = t.strings[:1] + for k := range t.symbolsMap { + if k == "" { + continue + } + delete(t.symbolsMap, k) + } +} + +// desymbolizeLabels decodes label references, with given symbols to labels. +func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels { + b.Reset() + for i := 0; i < len(labelRefs); i += 2 { + b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]]) + } + b.Sort() + return b.Labels() +} diff --git a/prompb/io/prometheus/write/v2/symbols_test.go b/prompb/io/prometheus/write/v2/symbols_test.go new file mode 100644 index 0000000000..3d852e88f1 --- /dev/null +++ b/prompb/io/prometheus/write/v2/symbols_test.go @@ -0,0 +1,60 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" +) + +func TestSymbolsTable(t *testing.T) { + s := NewSymbolTable() + require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist") + require.Equal(t, uint32(0), s.Symbolize("")) + require.Equal(t, []string{""}, s.Symbols()) + + require.Equal(t, uint32(1), s.Symbolize("abc")) + require.Equal(t, []string{"", "abc"}, s.Symbols()) + + require.Equal(t, uint32(2), s.Symbolize("__name__")) + require.Equal(t, []string{"", "abc", "__name__"}, s.Symbols()) + + require.Equal(t, uint32(3), s.Symbolize("foo")) + require.Equal(t, []string{"", "abc", "__name__", "foo"}, s.Symbols()) + + s.Reset() + require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist") + require.Equal(t, uint32(0), s.Symbolize("")) + + require.Equal(t, uint32(1), s.Symbolize("__name__")) + require.Equal(t, []string{"", "__name__"}, s.Symbols()) + + require.Equal(t, uint32(2), s.Symbolize("abc")) + require.Equal(t, []string{"", "__name__", "abc"}, s.Symbols()) + + ls := labels.FromStrings("__name__", "qwer", "zxcv", "1234") + encoded := s.SymbolizeLabels(ls, nil) + require.Equal(t, []uint32{1, 3, 4, 5}, encoded) + b := labels.NewScratchBuilder(len(encoded)) + decoded := desymbolizeLabels(&b, encoded, s.Symbols()) + require.Equal(t, ls, decoded) + + // Different buf. + ls = labels.FromStrings("__name__", "qwer", "zxcv2222", "1234") + encoded = s.SymbolizeLabels(ls, []uint32{1, 3, 4, 5}) + require.Equal(t, []uint32{1, 3, 6, 5}, encoded) +} diff --git a/prompb/io/prometheus/write/v2/types.pb.go b/prompb/io/prometheus/write/v2/types.pb.go new file mode 100644 index 0000000000..3420d20e25 --- /dev/null +++ b/prompb/io/prometheus/write/v2/types.pb.go @@ -0,0 +1,3236 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: io/prometheus/write/v2/types.proto + +package writev2 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Metadata_MetricType int32 + +const ( + Metadata_METRIC_TYPE_UNSPECIFIED Metadata_MetricType = 0 + Metadata_METRIC_TYPE_COUNTER Metadata_MetricType = 1 + Metadata_METRIC_TYPE_GAUGE Metadata_MetricType = 2 + Metadata_METRIC_TYPE_HISTOGRAM Metadata_MetricType = 3 + Metadata_METRIC_TYPE_GAUGEHISTOGRAM Metadata_MetricType = 4 + Metadata_METRIC_TYPE_SUMMARY Metadata_MetricType = 5 + Metadata_METRIC_TYPE_INFO Metadata_MetricType = 6 + Metadata_METRIC_TYPE_STATESET Metadata_MetricType = 7 +) + +var Metadata_MetricType_name = map[int32]string{ + 0: "METRIC_TYPE_UNSPECIFIED", + 1: "METRIC_TYPE_COUNTER", + 2: "METRIC_TYPE_GAUGE", + 3: "METRIC_TYPE_HISTOGRAM", + 4: "METRIC_TYPE_GAUGEHISTOGRAM", + 5: "METRIC_TYPE_SUMMARY", + 6: "METRIC_TYPE_INFO", + 7: "METRIC_TYPE_STATESET", +} + +var Metadata_MetricType_value = map[string]int32{ + "METRIC_TYPE_UNSPECIFIED": 0, + "METRIC_TYPE_COUNTER": 1, + "METRIC_TYPE_GAUGE": 2, + "METRIC_TYPE_HISTOGRAM": 3, + "METRIC_TYPE_GAUGEHISTOGRAM": 4, + "METRIC_TYPE_SUMMARY": 5, + "METRIC_TYPE_INFO": 6, + "METRIC_TYPE_STATESET": 7, +} + +func (x Metadata_MetricType) String() string { + return proto.EnumName(Metadata_MetricType_name, int32(x)) +} + +func (Metadata_MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{4, 0} +} + +type Histogram_ResetHint int32 + +const ( + Histogram_RESET_HINT_UNSPECIFIED Histogram_ResetHint = 0 + Histogram_RESET_HINT_YES Histogram_ResetHint = 1 + Histogram_RESET_HINT_NO Histogram_ResetHint = 2 + Histogram_RESET_HINT_GAUGE Histogram_ResetHint = 3 +) + +var Histogram_ResetHint_name = map[int32]string{ + 0: "RESET_HINT_UNSPECIFIED", + 1: "RESET_HINT_YES", + 2: "RESET_HINT_NO", + 3: "RESET_HINT_GAUGE", +} + +var Histogram_ResetHint_value = map[string]int32{ + "RESET_HINT_UNSPECIFIED": 0, + "RESET_HINT_YES": 1, + "RESET_HINT_NO": 2, + "RESET_HINT_GAUGE": 3, +} + +func (x Histogram_ResetHint) String() string { + return proto.EnumName(Histogram_ResetHint_name, int32(x)) +} + +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{5, 0} +} + +// Request represents a request to write the given timeseries to a remote destination. +// This message was introduced in the Remote Write 2.0 specification: +// https://prometheus.io/docs/concepts/remote_write_spec_2_0/ +// +// The canonical Content-Type request header value for this message is +// "application/x-protobuf;proto=io.prometheus.write.v2.Request" +// +// NOTE: gogoproto options might change in future for this file, they +// are not part of the spec proto (they only modify the generated Go code, not +// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908 +type Request struct { + // symbols contains a de-duplicated array of string elements used for various + // items in a Request message, like labels and metadata items. For the sender's convenience + // around empty values for optional fields like unit_ref, symbols array MUST start with + // empty string. + // + // To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you + // need to lookup the actual string by index from symbols array. The order of + // strings is up to the sender. The receiver should not assume any particular encoding. + Symbols []string `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"` + // timeseries represents an array of distinct series with 0 or more samples. + Timeseries []TimeSeries `protobuf:"bytes,5,rep,name=timeseries,proto3" json:"timeseries"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(m, src) +} +func (m *Request) XXX_Size() int { + return m.Size() +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetSymbols() []string { + if m != nil { + return m.Symbols + } + return nil +} + +func (m *Request) GetTimeseries() []TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +// TimeSeries represents a single series. +type TimeSeries struct { + // labels_refs is a list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's length is always + // a multiple of two, and the underlying labels should be sorted lexicographically. + // + // Note that there might be multiple TimeSeries objects in the same + // Requests with the same labels e.g. for different exemplars, metadata + // or created timestamp. + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // Timeseries messages can either specify samples or (native) histogram samples + // (histogram field), but not both. For a typical sender (real-time metric + // streaming), in healthy cases, there will be only one sample or histogram. + // + // Samples and histograms are sorted by timestamp (older first). + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Histograms []Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"` + // exemplars represents an optional set of exemplars attached to this series' samples. + Exemplars []Exemplar `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"` + // metadata represents the metadata associated with the given series' samples. + Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"` + // created_timestamp represents an optional created timestamp associated with + // this series' samples in ms format, typically for counter or histogram type + // metrics. Created timestamp represents the time when the counter started + // counting (sometimes referred to as start timestamp), which can increase + // the accuracy of query results. + // + // Note that some receivers might require this and in return fail to + // ingest such samples within the Request. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to + // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{1} +} +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return m.Size() +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetLabelsRefs() []uint32 { + if m != nil { + return m.LabelsRefs + } + return nil +} + +func (m *TimeSeries) GetSamples() []Sample { + if m != nil { + return m.Samples + } + return nil +} + +func (m *TimeSeries) GetHistograms() []Histogram { + if m != nil { + return m.Histograms + } + return nil +} + +func (m *TimeSeries) GetExemplars() []Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +func (m *TimeSeries) GetMetadata() Metadata { + if m != nil { + return m.Metadata + } + return Metadata{} +} + +func (m *TimeSeries) GetCreatedTimestamp() int64 { + if m != nil { + return m.CreatedTimestamp + } + return 0 +} + +// Exemplar is an additional information attached to some series' samples. +// It is typically used to attach an example trace or request ID associated with +// the metric changes. +type Exemplar struct { + // labels_refs is an optional list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's len is always + // a multiple of 2, and the underlying labels should be sorted lexicographically. + // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // value represents an exact example value. This can be useful when the exemplar + // is attached to a histogram, which only gives an estimated value through buckets. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + // timestamp represents the timestamp of the exemplar in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{2} +} +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return m.Size() +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabelsRefs() []uint32 { + if m != nil { + return m.LabelsRefs + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// Sample represents series sample. +type Sample struct { + // value of the sample. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{3} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Sample) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// Metadata represents the metadata associated with the given series' samples. +type Metadata struct { + Type Metadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=io.prometheus.write.v2.Metadata_MetricType" json:"type,omitempty"` + // help_ref is a reference to the Request.symbols array representing help + // text for the metric. Help is optional, reference should point to an empty string in + // such a case. + HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"` + // unit_ref is a reference to the Request.symbols array representing a unit + // for the metric. Unit is optional, reference should point to an empty string in + // such a case. + UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{4} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return m.Size() +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetType() Metadata_MetricType { + if m != nil { + return m.Type + } + return Metadata_METRIC_TYPE_UNSPECIFIED +} + +func (m *Metadata) GetHelpRef() uint32 { + if m != nil { + return m.HelpRef + } + return 0 +} + +func (m *Metadata) GetUnitRef() uint32 { + if m != nil { + return m.UnitRef + } + return 0 +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + // Types that are valid to be assigned to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` + // The schema defines the bucket schema. Currently, valid numbers + // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be + // added in future for new bucketing layouts. + // + // The schema equal to -53 means custom buckets. See + // custom_values field description for more details. + // + // Values between -4 and 8 represent base-2 bucket schema, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n (n is schema value) logarithmic buckets. Or in other words, + // each bucket boundary is the previous boundary times 2^(2^-n). + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + // Types that are valid to be assigned to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` + // Positive Buckets. + // + // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows: + // * The span offset+length points to an the index of the custom_values array + // or +Inf if pointing to the len of the array. + // * The counts and deltas have the same meaning as for exponential histograms. + PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=io.prometheus.write.v2.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // custom_values is an additional field used by non-exponential bucketing layouts. + // + // For custom buckets (-53 schema value) custom_values specify monotonically + // increasing upper inclusive boundaries for the bucket counts with arbitrary + // widths for this histogram. In other words, custom_values represents custom, + // explicit bucketing that could have been converted from the classic histograms. + // + // Those bounds are then referenced by spans in positive_spans with corresponding positive + // counts of deltas (refer to positive_spans for more details). This way we can + // have encode sparse histograms with custom bucketing (many buckets are often + // not used). + // + // Note that for custom bounds, even negative observations are placed in the positive + // counts to simplify the implementation and avoid ambiguity of where to place + // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and + // the zero bucket are unused, if the schema indicates custom bucketing. + // + // For each upper boundary the previous boundary represent the lower exclusive + // boundary for that bucket. The first element is the upper inclusive boundary + // for the first bucket, which implicitly has a lower inclusive bound of -Inf. + // This is similar to "le" label semantics on classic histograms. You may add a + // bucket with an upper bound of 0 to make sure that you really have no negative + // observations, but in practice, native histogram rendering will show both with + // or without first upper boundary 0 and no negative counts as the same case. + // + // The last element is not only the upper inclusive bound of the last regular + // bucket, but implicitly the lower exclusive bound of the +Inf bucket. + CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{5} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +type isHistogram_Count interface { + isHistogram_Count() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() + MarshalTo([]byte) (int, error) + Size() int +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"` +} +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"` +} +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"` +} +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} +func (*Histogram_CountFloat) isHistogram_Count() {} +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (m *Histogram) GetCountInt() uint64 { + if x, ok := m.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (m *Histogram) GetCountFloat() float64 { + if x, ok := m.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (m *Histogram) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCountInt() uint64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpans() []BucketSpan { + if m != nil { + return m.NegativeSpans + } + return nil +} + +func (m *Histogram) GetNegativeDeltas() []int64 { + if m != nil { + return m.NegativeDeltas + } + return nil +} + +func (m *Histogram) GetNegativeCounts() []float64 { + if m != nil { + return m.NegativeCounts + } + return nil +} + +func (m *Histogram) GetPositiveSpans() []BucketSpan { + if m != nil { + return m.PositiveSpans + } + return nil +} + +func (m *Histogram) GetPositiveDeltas() []int64 { + if m != nil { + return m.PositiveDeltas + } + return nil +} + +func (m *Histogram) GetPositiveCounts() []float64 { + if m != nil { + return m.PositiveCounts + } + return nil +} + +func (m *Histogram) GetResetHint() Histogram_ResetHint { + if m != nil { + return m.ResetHint + } + return Histogram_RESET_HINT_UNSPECIFIED +} + +func (m *Histogram) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *Histogram) GetCustomValues() []float64 { + if m != nil { + return m.CustomValues + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Histogram) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{6} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +func init() { + proto.RegisterEnum("io.prometheus.write.v2.Metadata_MetricType", Metadata_MetricType_name, Metadata_MetricType_value) + proto.RegisterEnum("io.prometheus.write.v2.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) + proto.RegisterType((*Request)(nil), "io.prometheus.write.v2.Request") + proto.RegisterType((*TimeSeries)(nil), "io.prometheus.write.v2.TimeSeries") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.write.v2.Exemplar") + proto.RegisterType((*Sample)(nil), "io.prometheus.write.v2.Sample") + proto.RegisterType((*Metadata)(nil), "io.prometheus.write.v2.Metadata") + proto.RegisterType((*Histogram)(nil), "io.prometheus.write.v2.Histogram") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.write.v2.BucketSpan") +} + +func init() { + proto.RegisterFile("io/prometheus/write/v2/types.proto", fileDescriptor_f139519efd9fa8d7) +} + +var fileDescriptor_f139519efd9fa8d7 = []byte{ + // 926 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x5d, 0x6f, 0xe3, 0x44, + 0x14, 0xed, 0xc4, 0x69, 0x3e, 0x6e, 0x9a, 0xac, 0x33, 0xb4, 0x5d, 0x6f, 0x81, 0x6c, 0xd6, 0x08, + 0x88, 0x58, 0x29, 0x91, 0xc2, 0xeb, 0x0a, 0xd4, 0xb4, 0x6e, 0x93, 0x95, 0x92, 0xac, 0x26, 0x2e, + 0x52, 0x79, 0xb1, 0xdc, 0x64, 0x92, 0x58, 0xd8, 0xb1, 0xf1, 0x4c, 0x02, 0xe5, 0xf7, 0xf1, 0xb0, + 0x8f, 0xfc, 0x01, 0x10, 0xf4, 0x9d, 0xff, 0x80, 0x66, 0xfc, 0xd9, 0x42, 0xbb, 0xe2, 0x6d, 0xe6, + 0xdc, 0x73, 0xee, 0x3d, 0xb9, 0xbe, 0x77, 0x02, 0xba, 0xe3, 0xf7, 0x82, 0xd0, 0xf7, 0x28, 0x5f, + 0xd3, 0x2d, 0xeb, 0xfd, 0x14, 0x3a, 0x9c, 0xf6, 0x76, 0xfd, 0x1e, 0xbf, 0x0d, 0x28, 0xeb, 0x06, + 0xa1, 0xcf, 0x7d, 0x7c, 0xec, 0xf8, 0xdd, 0x8c, 0xd3, 0x95, 0x9c, 0xee, 0xae, 0x7f, 0x72, 0xb8, + 0xf2, 0x57, 0xbe, 0xa4, 0xf4, 0xc4, 0x29, 0x62, 0xeb, 0x0c, 0xca, 0x84, 0xfe, 0xb8, 0xa5, 0x8c, + 0x63, 0x0d, 0xca, 0xec, 0xd6, 0xbb, 0xf1, 0x5d, 0xa6, 0x15, 0xdb, 0x4a, 0xa7, 0x4a, 0x92, 0x2b, + 0x1e, 0x02, 0x70, 0xc7, 0xa3, 0x8c, 0x86, 0x0e, 0x65, 0xda, 0x7e, 0x5b, 0xe9, 0xd4, 0xfa, 0x7a, + 0xf7, 0xbf, 0xeb, 0x74, 0x4d, 0xc7, 0xa3, 0x33, 0xc9, 0x1c, 0x14, 0xdf, 0xff, 0xf1, 0x72, 0x8f, + 0xe4, 0xb4, 0x6f, 0x8b, 0x15, 0xa4, 0x16, 0xf5, 0xbf, 0x0b, 0x00, 0x19, 0x0d, 0xbf, 0x84, 0x9a, + 0x6b, 0xdf, 0x50, 0x97, 0x59, 0x21, 0x5d, 0x32, 0x0d, 0xb5, 0x95, 0x4e, 0x9d, 0x40, 0x04, 0x11, + 0xba, 0x64, 0xf8, 0x1b, 0x28, 0x33, 0xdb, 0x0b, 0x5c, 0xca, 0xb4, 0x82, 0x2c, 0xde, 0x7a, 0xac, + 0xf8, 0x4c, 0xd2, 0xe2, 0xc2, 0x89, 0x08, 0x5f, 0x02, 0xac, 0x1d, 0xc6, 0xfd, 0x55, 0x68, 0x7b, + 0x4c, 0x53, 0x64, 0x8a, 0x57, 0x8f, 0xa5, 0x18, 0x26, 0xcc, 0xc4, 0x7e, 0x26, 0xc5, 0xe7, 0x50, + 0xa5, 0x3f, 0x53, 0x2f, 0x70, 0xed, 0x30, 0x6a, 0x52, 0xad, 0xdf, 0x7e, 0x2c, 0x8f, 0x11, 0x13, + 0xe3, 0x34, 0x99, 0x10, 0x0f, 0xa0, 0xe2, 0x51, 0x6e, 0x2f, 0x6c, 0x6e, 0x6b, 0xfb, 0x6d, 0xf4, + 0x54, 0x92, 0x71, 0xcc, 0x8b, 0x93, 0xa4, 0x3a, 0xfc, 0x1a, 0x9a, 0xf3, 0x90, 0xda, 0x9c, 0x2e, + 0x2c, 0xd9, 0x5e, 0x6e, 0x7b, 0x81, 0x56, 0x6a, 0xa3, 0x8e, 0x42, 0xd4, 0x38, 0x60, 0x26, 0xb8, + 0x6e, 0x41, 0x25, 0x71, 0xf3, 0xe1, 0x66, 0x1f, 0xc2, 0xfe, 0xce, 0x76, 0xb7, 0x54, 0x2b, 0xb4, + 0x51, 0x07, 0x91, 0xe8, 0x82, 0x3f, 0x81, 0x6a, 0x56, 0x47, 0x91, 0x75, 0x32, 0x40, 0x7f, 0x03, + 0xa5, 0xa8, 0xf3, 0x99, 0x1a, 0x3d, 0xaa, 0x2e, 0x3c, 0x54, 0xff, 0x55, 0x80, 0x4a, 0xf2, 0x43, + 0xf1, 0xb7, 0x50, 0x14, 0xd3, 0x2c, 0xf5, 0x8d, 0xfe, 0xeb, 0x0f, 0x35, 0x46, 0x1c, 0x42, 0x67, + 0x6e, 0xde, 0x06, 0x94, 0x48, 0x21, 0x7e, 0x01, 0x95, 0x35, 0x75, 0x03, 0xf1, 0xf3, 0xa4, 0xd1, + 0x3a, 0x29, 0x8b, 0x3b, 0xa1, 0x4b, 0x11, 0xda, 0x6e, 0x1c, 0x2e, 0x43, 0xc5, 0x28, 0x24, 0xee, + 0x84, 0x2e, 0xf5, 0xdf, 0x11, 0x40, 0x96, 0x0a, 0x7f, 0x0c, 0xcf, 0xc7, 0x86, 0x49, 0x46, 0x67, + 0x96, 0x79, 0xfd, 0xce, 0xb0, 0xae, 0x26, 0xb3, 0x77, 0xc6, 0xd9, 0xe8, 0x62, 0x64, 0x9c, 0xab, + 0x7b, 0xf8, 0x39, 0x7c, 0x94, 0x0f, 0x9e, 0x4d, 0xaf, 0x26, 0xa6, 0x41, 0x54, 0x84, 0x8f, 0xa0, + 0x99, 0x0f, 0x5c, 0x9e, 0x5e, 0x5d, 0x1a, 0x6a, 0x01, 0xbf, 0x80, 0xa3, 0x3c, 0x3c, 0x1c, 0xcd, + 0xcc, 0xe9, 0x25, 0x39, 0x1d, 0xab, 0x0a, 0x6e, 0xc1, 0xc9, 0xbf, 0x14, 0x59, 0xbc, 0xf8, 0xb0, + 0xd4, 0xec, 0x6a, 0x3c, 0x3e, 0x25, 0xd7, 0xea, 0x3e, 0x3e, 0x04, 0x35, 0x1f, 0x18, 0x4d, 0x2e, + 0xa6, 0x6a, 0x09, 0x6b, 0x70, 0x78, 0x8f, 0x6e, 0x9e, 0x9a, 0xc6, 0xcc, 0x30, 0xd5, 0xb2, 0xfe, + 0x6b, 0x09, 0xaa, 0xe9, 0x64, 0xe3, 0x4f, 0xa1, 0x3a, 0xf7, 0xb7, 0x1b, 0x6e, 0x39, 0x1b, 0x2e, + 0x3b, 0x5d, 0x1c, 0xee, 0x91, 0x8a, 0x84, 0x46, 0x1b, 0x8e, 0x5f, 0x41, 0x2d, 0x0a, 0x2f, 0x5d, + 0xdf, 0xe6, 0xd1, 0x20, 0x0c, 0xf7, 0x08, 0x48, 0xf0, 0x42, 0x60, 0x58, 0x05, 0x85, 0x6d, 0x3d, + 0xd9, 0x60, 0x44, 0xc4, 0x11, 0x1f, 0x43, 0x89, 0xcd, 0xd7, 0xd4, 0xb3, 0x65, 0x6b, 0x9b, 0x24, + 0xbe, 0xe1, 0xcf, 0xa1, 0xf1, 0x0b, 0x0d, 0x7d, 0x8b, 0xaf, 0x43, 0xca, 0xd6, 0xbe, 0xbb, 0x90, + 0x33, 0x8f, 0x48, 0x5d, 0xa0, 0x66, 0x02, 0xe2, 0x2f, 0x62, 0x5a, 0xe6, 0xab, 0x24, 0x7d, 0x21, + 0x72, 0x20, 0xf0, 0xb3, 0xc4, 0xdb, 0x57, 0xa0, 0xe6, 0x78, 0x91, 0xc1, 0xb2, 0x34, 0x88, 0x48, + 0x23, 0x65, 0x46, 0x26, 0xa7, 0xd0, 0xd8, 0xd0, 0x95, 0xcd, 0x9d, 0x1d, 0xb5, 0x58, 0x60, 0x6f, + 0x98, 0x56, 0x79, 0xfa, 0xed, 0x1a, 0x6c, 0xe7, 0x3f, 0x50, 0x3e, 0x0b, 0xec, 0x4d, 0xbc, 0x70, + 0xf5, 0x44, 0x2f, 0x30, 0x86, 0xbf, 0x84, 0x67, 0x69, 0xc2, 0x05, 0x75, 0xb9, 0xcd, 0xb4, 0x6a, + 0x5b, 0xe9, 0x60, 0x92, 0xd6, 0x39, 0x97, 0xe8, 0x3d, 0xa2, 0x74, 0xca, 0x34, 0x68, 0x2b, 0x1d, + 0x94, 0x11, 0xa5, 0x4d, 0x26, 0x2c, 0x06, 0x3e, 0x73, 0x72, 0x16, 0x6b, 0xff, 0xd7, 0x62, 0xa2, + 0x4f, 0x2d, 0xa6, 0x09, 0x63, 0x8b, 0x07, 0x91, 0xc5, 0x04, 0xce, 0x2c, 0xa6, 0xc4, 0xd8, 0x62, + 0x3d, 0xb2, 0x98, 0xc0, 0xb1, 0xc5, 0xb7, 0x00, 0x21, 0x65, 0x94, 0x5b, 0x6b, 0xf1, 0x55, 0x1a, + 0x4f, 0xef, 0x65, 0x3a, 0x63, 0x5d, 0x22, 0x34, 0x43, 0x67, 0xc3, 0x49, 0x35, 0x4c, 0x8e, 0xf7, + 0x1f, 0x82, 0x67, 0x0f, 0x1e, 0x02, 0xfc, 0x19, 0xd4, 0xe7, 0x5b, 0xc6, 0x7d, 0xcf, 0x92, 0xcf, + 0x06, 0xd3, 0x54, 0x69, 0xe8, 0x20, 0x02, 0xbf, 0x93, 0x98, 0xbe, 0x80, 0x6a, 0x9a, 0x1a, 0x9f, + 0xc0, 0x31, 0x11, 0x13, 0x6e, 0x0d, 0x47, 0x13, 0xf3, 0xc1, 0x9a, 0x62, 0x68, 0xe4, 0x62, 0xd7, + 0xc6, 0x4c, 0x45, 0xb8, 0x09, 0xf5, 0x1c, 0x36, 0x99, 0xaa, 0x05, 0xb1, 0x49, 0x39, 0x28, 0xda, + 0x59, 0x65, 0x50, 0x86, 0x7d, 0xd9, 0x94, 0xc1, 0x01, 0x40, 0x36, 0x6f, 0xfa, 0x1b, 0x80, 0xec, + 0x03, 0x88, 0x91, 0xf7, 0x97, 0x4b, 0x46, 0xa3, 0x1d, 0x6a, 0x92, 0xf8, 0x26, 0x70, 0x97, 0x6e, + 0x56, 0x7c, 0x2d, 0x57, 0xa7, 0x4e, 0xe2, 0xdb, 0xe0, 0xe8, 0xfd, 0x5d, 0x0b, 0xfd, 0x76, 0xd7, + 0x42, 0x7f, 0xde, 0xb5, 0xd0, 0xf7, 0x65, 0xd9, 0xb4, 0x5d, 0xff, 0xa6, 0x24, 0xff, 0x8a, 0xbf, + 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xfc, 0x93, 0x1c, 0xde, 0x07, 0x00, 0x00, +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Symbols) > 0 { + for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Symbols[iNdEx]) + copy(dAtA[i:], m.Symbols[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreatedTimestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.LabelsRefs) > 0 { + dAtA3 := make([]byte, len(m.LabelsRefs)*10) + var j2 int + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j2++ + } + dAtA3[j2] = uint8(num) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA3[:j2]) + i = encodeVarintTypes(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.LabelsRefs) > 0 { + dAtA5 := make([]byte, len(m.LabelsRefs)*10) + var j4 int + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ + } + i -= j4 + copy(dAtA[i:], dAtA5[:j4]) + i = encodeVarintTypes(dAtA, i, uint64(j4)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.UnitRef != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.UnitRef)) + i-- + dAtA[i] = 0x20 + } + if m.HelpRef != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.HelpRef)) + i-- + dAtA[i] = 0x18 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.CustomValues) > 0 { + for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- { + f6 := math.Float64bits(float64(m.CustomValues[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.CustomValues)*8)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f7 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f7)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var j8 int + dAtA10 := make([]byte, len(m.PositiveDeltas)*10) + for _, num := range m.PositiveDeltas { + x9 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x9 >= 1<<7 { + dAtA10[j8] = uint8(uint64(x9)&0x7f | 0x80) + j8++ + x9 >>= 7 + } + dAtA10[j8] = uint8(x9) + j8++ + } + i -= j8 + copy(dAtA[i:], dAtA10[:j8]) + i = encodeVarintTypes(dAtA, i, uint64(j8)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f11 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f11)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var j12 int + dAtA14 := make([]byte, len(m.NegativeDeltas)*10) + for _, num := range m.NegativeDeltas { + x13 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x13 >= 1<<7 { + dAtA14[j12] = uint8(uint64(x13)&0x7f | 0x80) + j12++ + x13 >>= 7 + } + dAtA14[j12] = uint8(x13) + j12++ + } + i -= j12 + copy(dAtA[i:], dAtA14[:j12]) + i = encodeVarintTypes(dAtA, i, uint64(j12)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.ZeroCount != nil { + { + size := m.ZeroCount.Size() + i -= size + if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + if m.Count != nil { + { + size := m.Count.Size() + i -= size + if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Symbols) > 0 { + for _, s := range m.Symbols { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TimeSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.Metadata.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.CreatedTimestamp != 0 { + n += 1 + sovTypes(uint64(m.CreatedTimestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.HelpRef != 0 { + n += 1 + sovTypes(uint64(m.HelpRef)) + } + if m.UnitRef != 0 { + n += 1 + sovTypes(uint64(m.UnitRef)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += m.Count.Size() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozTypes(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != nil { + n += m.ZeroCount.Size() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + sovTypes(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if len(m.CustomValues) > 0 { + n += 2 + sovTypes(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram_CountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozTypes(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovTypes(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + m.CreatedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Metadata_MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType) + } + m.HelpRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HelpRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType) + } + m.UnitRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnitRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &Histogram_CountInt{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ZeroCount = &Histogram_ZeroCountInt{v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.CustomValues) == 0 { + m.CustomValues = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/prompb/io/prometheus/write/v2/types.proto b/prompb/io/prometheus/write/v2/types.proto new file mode 100644 index 0000000000..ff6c4936bb --- /dev/null +++ b/prompb/io/prometheus/write/v2/types.proto @@ -0,0 +1,255 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// NOTE: This file is also available on https://buf.build/prometheus/prometheus/docs/main:io.prometheus.write.v2 + +syntax = "proto3"; +package io.prometheus.write.v2; + +option go_package = "writev2"; + +import "gogoproto/gogo.proto"; + +// Request represents a request to write the given timeseries to a remote destination. +// This message was introduced in the Remote Write 2.0 specification: +// https://prometheus.io/docs/concepts/remote_write_spec_2_0/ +// +// The canonical Content-Type request header value for this message is +// "application/x-protobuf;proto=io.prometheus.write.v2.Request" +// +// NOTE: gogoproto options might change in future for this file, they +// are not part of the spec proto (they only modify the generated Go code, not +// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908 +message Request { + // Since Request supersedes 1.0 spec's prometheus.WriteRequest, we reserve the top-down message + // for the deterministic interop between those two, see types_test.go for details. + // Generally it's not needed, because Receivers must use the Content-Type header, but we want to + // be sympathetic to adopters with mistaken implementations and have deterministic error (empty + // message if you use the wrong proto schema). + reserved 1 to 3; + + // symbols contains a de-duplicated array of string elements used for various + // items in a Request message, like labels and metadata items. For the sender's convenience + // around empty values for optional fields like unit_ref, symbols array MUST start with + // empty string. + // + // To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you + // need to lookup the actual string by index from symbols array. The order of + // strings is up to the sender. The receiver should not assume any particular encoding. + repeated string symbols = 4; + // timeseries represents an array of distinct series with 0 or more samples. + repeated TimeSeries timeseries = 5 [(gogoproto.nullable) = false]; +} + +// TimeSeries represents a single series. +message TimeSeries { + // labels_refs is a list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's length is always + // a multiple of two, and the underlying labels should be sorted lexicographically. + // + // Note that there might be multiple TimeSeries objects in the same + // Requests with the same labels e.g. for different exemplars, metadata + // or created timestamp. + repeated uint32 labels_refs = 1; + + // Timeseries messages can either specify samples or (native) histogram samples + // (histogram field), but not both. For a typical sender (real-time metric + // streaming), in healthy cases, there will be only one sample or histogram. + // + // Samples and histograms are sorted by timestamp (older first). + repeated Sample samples = 2 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 3 [(gogoproto.nullable) = false]; + + // exemplars represents an optional set of exemplars attached to this series' samples. + repeated Exemplar exemplars = 4 [(gogoproto.nullable) = false]; + + // metadata represents the metadata associated with the given series' samples. + Metadata metadata = 5 [(gogoproto.nullable) = false]; + + // created_timestamp represents an optional created timestamp associated with + // this series' samples in ms format, typically for counter or histogram type + // metrics. Created timestamp represents the time when the counter started + // counting (sometimes referred to as start timestamp), which can increase + // the accuracy of query results. + // + // Note that some receivers might require this and in return fail to + // ingest such samples within the Request. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to + // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + int64 created_timestamp = 6; +} + +// Exemplar is an additional information attached to some series' samples. +// It is typically used to attach an example trace or request ID associated with +// the metric changes. +message Exemplar { + // labels_refs is an optional list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's len is always + // a multiple of 2, and the underlying labels should be sorted lexicographically. + // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. + repeated uint32 labels_refs = 1; + // value represents an exact example value. This can be useful when the exemplar + // is attached to a histogram, which only gives an estimated value through buckets. + double value = 2; + // timestamp represents the timestamp of the exemplar in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + int64 timestamp = 3; +} + +// Sample represents series sample. +message Sample { + // value of the sample. + double value = 1; + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + int64 timestamp = 2; +} + +// Metadata represents the metadata associated with the given series' samples. +message Metadata { + enum MetricType { + METRIC_TYPE_UNSPECIFIED = 0; + METRIC_TYPE_COUNTER = 1; + METRIC_TYPE_GAUGE = 2; + METRIC_TYPE_HISTOGRAM = 3; + METRIC_TYPE_GAUGEHISTOGRAM = 4; + METRIC_TYPE_SUMMARY = 5; + METRIC_TYPE_INFO = 6; + METRIC_TYPE_STATESET = 7; + } + MetricType type = 1; + // help_ref is a reference to the Request.symbols array representing help + // text for the metric. Help is optional, reference should point to an empty string in + // such a case. + uint32 help_ref = 3; + // unit_ref is a reference to the Request.symbols array representing a unit + // for the metric. Unit is optional, reference should point to an empty string in + // such a case. + uint32 unit_ref = 4; +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +message Histogram { + enum ResetHint { + RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly. + RESET_HINT_YES = 1; // This is the 1st histogram after a counter reset. + RESET_HINT_NO = 2; // There was no counter reset between this and the previous Histogram. + RESET_HINT_GAUGE = 3; // This is a gauge histogram where counter resets don't happen. + } + + oneof count { // Count of observations in the histogram. + uint64 count_int = 1; + double count_float = 2; + } + double sum = 3; // Sum of observations in the histogram. + + // The schema defines the bucket schema. Currently, valid numbers + // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be + // added in future for new bucketing layouts. + // + // The schema equal to -53 means custom buckets. See + // custom_values field description for more details. + // + // Values between -4 and 8 represent base-2 bucket schema, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n (n is schema value) logarithmic buckets. Or in other words, + // each bucket boundary is the previous boundary times 2^(2^-n). + sint32 schema = 4; + double zero_threshold = 5; // Breadth of the zero bucket. + oneof zero_count { // Count in zero bucket. + uint64 zero_count_int = 6; + double zero_count_float = 7; + } + + // Negative Buckets. + repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_counts = 10; // Absolute count of each bucket. + + // Positive Buckets. + // + // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows: + // * The span offset+length points to an the index of the custom_values array + // or +Inf if pointing to the len of the array. + // * The counts and deltas have the same meaning as for exponential histograms. + repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_counts = 13; // Absolute count of each bucket. + + ResetHint reset_hint = 14; + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + int64 timestamp = 15; + + // custom_values is an additional field used by non-exponential bucketing layouts. + // + // For custom buckets (-53 schema value) custom_values specify monotonically + // increasing upper inclusive boundaries for the bucket counts with arbitrary + // widths for this histogram. In other words, custom_values represents custom, + // explicit bucketing that could have been converted from the classic histograms. + // + // Those bounds are then referenced by spans in positive_spans with corresponding positive + // counts of deltas (refer to positive_spans for more details). This way we can + // have encode sparse histograms with custom bucketing (many buckets are often + // not used). + // + // Note that for custom bounds, even negative observations are placed in the positive + // counts to simplify the implementation and avoid ambiguity of where to place + // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and + // the zero bucket are unused, if the schema indicates custom bucketing. + // + // For each upper boundary the previous boundary represent the lower exclusive + // boundary for that bucket. The first element is the upper inclusive boundary + // for the first bucket, which implicitly has a lower inclusive bound of -Inf. + // This is similar to "le" label semantics on classic histograms. You may add a + // bucket with an upper bound of 0 to make sure that you really have no negative + // observations, but in practice, native histogram rendering will show both with + // or without first upper boundary 0 and no negative counts as the same case. + // + // The last element is not only the upper inclusive bound of the last regular + // bucket, but implicitly the lower exclusive bound of the +Inf bucket. + repeated double custom_values = 16; +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} diff --git a/prompb/io/prometheus/write/v2/types_test.go b/prompb/io/prometheus/write/v2/types_test.go new file mode 100644 index 0000000000..5b7622fc2f --- /dev/null +++ b/prompb/io/prometheus/write/v2/types_test.go @@ -0,0 +1,97 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/prompb" +) + +func TestInteropV2UnmarshalWithV1_DeterministicEmpty(t *testing.T) { + expectedV1Empty := &prompb.WriteRequest{} + for _, tc := range []struct{ incoming *Request }{ + { + incoming: &Request{}, // Technically wrong, should be at least empty string in symbol. + }, + { + incoming: &Request{ + Symbols: []string{""}, + }, // NOTE: Without reserved fields, failed with "corrupted" ghost TimeSeries element. + }, + { + incoming: &Request{ + Symbols: []string{"", "__name__", "metric1"}, + Timeseries: []TimeSeries{ + {LabelsRefs: []uint32{1, 2}}, + {Samples: []Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}}}, + }, // NOTE: Without reserved fields, proto: illegal wireType 7 + }, + }, + } { + t.Run("", func(t *testing.T) { + in, err := proto.Marshal(tc.incoming) + require.NoError(t, err) + + // Test accidental unmarshal of v2 payload with v1 proto. + out := &prompb.WriteRequest{} + require.NoError(t, proto.Unmarshal(in, out)) + + // Drop unknowns, we expect them when incoming payload had some fields. + // This field & method will be likely gone after gogo removal. + out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables. + + require.Equal(t, expectedV1Empty, out) + }) + } +} + +func TestInteropV1UnmarshalWithV2_DeterministicEmpty(t *testing.T) { + expectedV2Empty := &Request{} + for _, tc := range []struct{ incoming *prompb.WriteRequest }{ + { + incoming: &prompb.WriteRequest{}, + }, + { + incoming: &prompb.WriteRequest{ + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}}, + Samples: []prompb.Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}}, + }, + }, + }, + // NOTE: Without reserved fields, results in corrupted v2.Request.Symbols. + }, + } { + t.Run("", func(t *testing.T) { + in, err := proto.Marshal(tc.incoming) + require.NoError(t, err) + + // Test accidental unmarshal of v1 payload with v2 proto. + out := &Request{} + require.NoError(t, proto.Unmarshal(in, out)) + + // Drop unknowns, we expect them when incoming payload had some fields. + // This field & method will be likely gone after gogo removal. + out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables. + + require.Equal(t, expectedV2Empty, out) + }) + } +} diff --git a/prompb/rwcommon/codec_test.go b/prompb/rwcommon/codec_test.go new file mode 100644 index 0000000000..2ab95e0d19 --- /dev/null +++ b/prompb/rwcommon/codec_test.go @@ -0,0 +1,298 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rwcommon + +import ( + "testing" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" +) + +func TestToLabels(t *testing.T) { + expected := labels.FromStrings("__name__", "metric1", "foo", "bar") + + t.Run("v1", func(t *testing.T) { + ts := prompb.TimeSeries{Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}, {Name: "foo", Value: "bar"}}} + b := labels.NewScratchBuilder(2) + require.Equal(t, expected, ts.ToLabels(&b, nil)) + require.Equal(t, ts.Labels, prompb.FromLabels(expected, nil)) + require.Equal(t, ts.Labels, prompb.FromLabels(expected, ts.Labels)) + }) + t.Run("v2", func(t *testing.T) { + v2Symbols := []string{"", "__name__", "metric1", "foo", "bar"} + ts := writev2.TimeSeries{LabelsRefs: []uint32{1, 2, 3, 4}} + b := labels.NewScratchBuilder(2) + require.Equal(t, expected, ts.ToLabels(&b, v2Symbols)) + // No need for FromLabels in our prod code as we use symbol table to do so. + }) +} + +func TestFromMetadataType(t *testing.T) { + for _, tc := range []struct { + desc string + input model.MetricType + expectedV1 prompb.MetricMetadata_MetricType + expectedV2 writev2.Metadata_MetricType + }{ + { + desc: "with a single-word metric", + input: model.MetricTypeCounter, + expectedV1: prompb.MetricMetadata_COUNTER, + expectedV2: writev2.Metadata_METRIC_TYPE_COUNTER, + }, + { + desc: "with a two-word metric", + input: model.MetricTypeStateset, + expectedV1: prompb.MetricMetadata_STATESET, + expectedV2: writev2.Metadata_METRIC_TYPE_STATESET, + }, + { + desc: "with an unknown metric", + input: "not-known", + expectedV1: prompb.MetricMetadata_UNKNOWN, + expectedV2: writev2.Metadata_METRIC_TYPE_UNSPECIFIED, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + t.Run("v1", func(t *testing.T) { + require.Equal(t, tc.expectedV1, prompb.FromMetadataType(tc.input)) + }) + t.Run("v2", func(t *testing.T) { + require.Equal(t, tc.expectedV2, writev2.FromMetadataType(tc.input)) + }) + }) + } +} + +func TestToMetadata(t *testing.T) { + sym := writev2.NewSymbolTable() + + for _, tc := range []struct { + input writev2.Metadata + expected metadata.Metadata + }{ + { + input: writev2.Metadata{}, + expected: metadata.Metadata{ + Type: model.MetricTypeUnknown, + }, + }, + { + input: writev2.Metadata{ + Type: 12414, // Unknown. + }, + expected: metadata.Metadata{ + Type: model.MetricTypeUnknown, + }, + }, + { + input: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, + HelpRef: sym.Symbolize("help1"), + UnitRef: sym.Symbolize("unit1"), + }, + expected: metadata.Metadata{ + Type: model.MetricTypeCounter, + Help: "help1", + Unit: "unit1", + }, + }, + { + input: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_STATESET, + HelpRef: sym.Symbolize("help2"), + }, + expected: metadata.Metadata{ + Type: model.MetricTypeStateset, + Help: "help2", + }, + }, + } { + t.Run("", func(t *testing.T) { + ts := writev2.TimeSeries{Metadata: tc.input} + require.Equal(t, tc.expected, ts.ToMetadata(sym.Symbols())) + }) + } +} + +func TestToHistogram_Empty(t *testing.T) { + t.Run("v1", func(t *testing.T) { + require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "") + require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "") + }) + t.Run("v2", func(t *testing.T) { + require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "") + require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "") + }) +} + +// NOTE(bwplotka): This is technically not a valid histogram, but it represents +// important cases to test when copying or converting to/from int/float histograms. +func testIntHistogram() histogram.Histogram { + return histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + Count: 19, + Sum: 2.7, + ZeroThreshold: 1e-128, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 0}, + {Offset: 0, Length: 1}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, + CustomValues: []float64{21421, 523}, + } +} + +// NOTE(bwplotka): This is technically not a valid histogram, but it represents +// important cases to test when copying or converting to/from int/float histograms. +func testFloatHistogram() histogram.FloatHistogram { + return histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + Count: 19, + Sum: 2.7, + ZeroThreshold: 1e-128, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 0}, + {Offset: 0, Length: 1}, + }, + NegativeBuckets: []float64{1, 3, 1, 2, 1, 1}, + CustomValues: []float64{21421, 523}, + } +} + +func TestFromIntToFloatOrIntHistogram(t *testing.T) { + t.Run("v1", func(t *testing.T) { + // v1 does not support nhcb. + testIntHistWithoutNHCB := testIntHistogram() + testIntHistWithoutNHCB.CustomValues = nil + testFloatHistWithoutNHCB := testFloatHistogram() + testFloatHistWithoutNHCB.CustomValues = nil + + h := prompb.FromIntHistogram(123, &testIntHistWithoutNHCB) + require.False(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Equal(t, testIntHistWithoutNHCB, *h.ToIntHistogram()) + require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram()) + }) + t.Run("v2", func(t *testing.T) { + testIntHist := testIntHistogram() + testFloatHist := testFloatHistogram() + + h := writev2.FromIntHistogram(123, &testIntHist) + require.False(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Equal(t, testIntHist, *h.ToIntHistogram()) + require.Equal(t, testFloatHist, *h.ToFloatHistogram()) + }) +} + +func TestFromFloatToFloatHistogram(t *testing.T) { + t.Run("v1", func(t *testing.T) { + // v1 does not support nhcb. + testFloatHistWithoutNHCB := testFloatHistogram() + testFloatHistWithoutNHCB.CustomValues = nil + + h := prompb.FromFloatHistogram(123, &testFloatHistWithoutNHCB) + require.True(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Nil(t, h.ToIntHistogram()) + require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram()) + }) + t.Run("v2", func(t *testing.T) { + testFloatHist := testFloatHistogram() + + h := writev2.FromFloatHistogram(123, &testFloatHist) + require.True(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Nil(t, h.ToIntHistogram()) + require.Equal(t, testFloatHist, *h.ToFloatHistogram()) + }) +} + +func TestFromIntOrFloatHistogram_ResetHint(t *testing.T) { + for _, tc := range []struct { + input histogram.CounterResetHint + expectedV1 prompb.Histogram_ResetHint + expectedV2 writev2.Histogram_ResetHint + }{ + { + input: histogram.UnknownCounterReset, + expectedV1: prompb.Histogram_UNKNOWN, + expectedV2: writev2.Histogram_RESET_HINT_UNSPECIFIED, + }, + { + input: histogram.CounterReset, + expectedV1: prompb.Histogram_YES, + expectedV2: writev2.Histogram_RESET_HINT_YES, + }, + { + input: histogram.NotCounterReset, + expectedV1: prompb.Histogram_NO, + expectedV2: writev2.Histogram_RESET_HINT_NO, + }, + { + input: histogram.GaugeType, + expectedV1: prompb.Histogram_GAUGE, + expectedV2: writev2.Histogram_RESET_HINT_GAUGE, + }, + } { + t.Run("", func(t *testing.T) { + t.Run("v1", func(t *testing.T) { + h := testIntHistogram() + h.CounterResetHint = tc.input + got := prompb.FromIntHistogram(1337, &h) + require.Equal(t, tc.expectedV1, got.GetResetHint()) + + fh := testFloatHistogram() + fh.CounterResetHint = tc.input + got2 := prompb.FromFloatHistogram(1337, &fh) + require.Equal(t, tc.expectedV1, got2.GetResetHint()) + }) + t.Run("v2", func(t *testing.T) { + h := testIntHistogram() + h.CounterResetHint = tc.input + got := writev2.FromIntHistogram(1337, &h) + require.Equal(t, tc.expectedV2, got.GetResetHint()) + + fh := testFloatHistogram() + fh.CounterResetHint = tc.input + got2 := writev2.FromFloatHistogram(1337, &fh) + require.Equal(t, tc.expectedV2, got2.GetResetHint()) + }) + }) + } +} diff --git a/prompb/types.pb.go b/prompb/types.pb.go index 125f868e97..93883daa13 100644 --- a/prompb/types.pb.go +++ b/prompb/types.pb.go @@ -164,7 +164,7 @@ func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { type MetricMetadata struct { // Represents the metric type, these match the set from Prometheus. - // Refer to model/textparse/interface.go for details. + // Refer to github.com/prometheus/common/model/metadata.go for details. Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.MetricMetadata_MetricType" json:"type,omitempty"` MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` diff --git a/prompb/types.proto b/prompb/types.proto index aa322515c3..61fc1e0143 100644 --- a/prompb/types.proto +++ b/prompb/types.proto @@ -31,7 +31,7 @@ message MetricMetadata { } // Represents the metric type, these match the set from Prometheus. - // Refer to model/textparse/interface.go for details. + // Refer to github.com/prometheus/common/model/metadata.go for details. MetricType type = 1; string metric_family_name = 2; string help = 4; diff --git a/promql/bench_test.go b/promql/bench_test.go index c6a528f7bf..cd6d1190ca 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "context" @@ -21,14 +21,24 @@ import ( "testing" "time" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/teststorage" ) -func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error { +func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, interval, numIntervals int) error { + ctx := context.Background() + metrics := []labels.Labels{} + // Generating test series: a_X, b_X, and h_X, where X can take values of one, ten, or hundred, + // representing the number of series each metric name contains. + // Metric a_X and b_X are simple metrics where h_X is a histogram. + // These metrics will have data for all test time range metrics = append(metrics, labels.FromStrings("__name__", "a_one")) metrics = append(metrics, labels.FromStrings("__name__", "b_one")) for j := 0; j < 10; j++ { @@ -55,6 +65,9 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, } refs := make([]storage.SeriesRef, len(metrics)) + // Number points for each different label value of "l" for the sparse series + pointsPerSparseSeries := numIntervals / 50 + for s := 0; s < numIntervals; s++ { a := stor.Appender(context.Background()) ts := int64(s * interval) @@ -62,10 +75,20 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, ref, _ := a.Append(refs[i], metric, ts, float64(s)+float64(i)/float64(len(metrics))) refs[i] = ref } + // Generating a sparse time series: each label value of "l" will contain data only for + // pointsPerSparseSeries points + metric := labels.FromStrings("__name__", "sparse", "l", strconv.Itoa(s/pointsPerSparseSeries)) + _, err := a.Append(0, metric, ts, float64(s)/float64(len(metrics))) + if err != nil { + return err + } if err := a.Commit(); err != nil { return err } } + + stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series. + stor.DB.Compact(ctx) return nil } @@ -88,9 +111,13 @@ func rangeQueryCases() []benchCase { expr: "rate(a_X[1m])", steps: 10000, }, + { + expr: "rate(sparse[1m])", + steps: 10000, + }, // Holt-Winters and long ranges. { - expr: "holt_winters(a_X[1d], 0.3, 0.3)", + expr: "double_exponential_smoothing(a_X[1d], 0.3, 0.3)", }, { expr: "changes(a_X[1d])", @@ -139,6 +166,9 @@ func rangeQueryCases() []benchCase { { expr: "sum(a_X)", }, + { + expr: "avg(a_X)", + }, { expr: "sum without (l)(h_X)", }, @@ -152,7 +182,8 @@ func rangeQueryCases() []benchCase { expr: "sum by (le)(h_X)", }, { - expr: "count_values('value', h_X)", + expr: "count_values('value', h_X)", + steps: 100, }, { expr: "topk(1, a_X)", @@ -160,6 +191,21 @@ func rangeQueryCases() []benchCase { { expr: "topk(5, a_X)", }, + { + expr: "limitk(1, a_X)", + }, + { + expr: "limitk(5, a_X)", + }, + { + expr: "limit_ratio(0.1, a_X)", + }, + { + expr: "limit_ratio(0.5, a_X)", + }, + { + expr: "limit_ratio(-0.5, a_X)", + }, // Combinations. { expr: "rate(a_X[1m]) + rate(b_X[1m])", @@ -212,7 +258,6 @@ func rangeQueryCases() []benchCase { tmp = append(tmp, c) } else { tmp = append(tmp, benchCase{expr: c.expr, steps: 1}) - tmp = append(tmp, benchCase{expr: c.expr, steps: 10}) tmp = append(tmp, benchCase{expr: c.expr, steps: 100}) tmp = append(tmp, benchCase{expr: c.expr, steps: 1000}) } @@ -222,14 +267,15 @@ func rangeQueryCases() []benchCase { func BenchmarkRangeQuery(b *testing.B) { stor := teststorage.New(b) + stor.DB.DisableCompactions() // Don't want auto-compaction disrupting timings. defer stor.Close() - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 50000000, Timeout: 100 * time.Second, } - engine := NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(b, opts) const interval = 10000 // 10s interval. // A day of data plus 10k steps. @@ -264,6 +310,111 @@ func BenchmarkRangeQuery(b *testing.B) { } } +func BenchmarkNativeHistograms(b *testing.B) { + testStorage := teststorage.New(b) + defer testStorage.Close() + + app := testStorage.Appender(context.TODO()) + if err := generateNativeHistogramSeries(app, 3000); err != nil { + b.Fatal(err) + } + if err := app.Commit(); err != nil { + b.Fatal(err) + } + + start := time.Unix(0, 0) + end := start.Add(2 * time.Hour) + step := time.Second * 30 + + cases := []struct { + name string + query string + }{ + { + name: "sum", + query: "sum(native_histogram_series)", + }, + { + name: "sum rate with short rate interval", + query: "sum(rate(native_histogram_series[2m]))", + }, + { + name: "sum rate with long rate interval", + query: "sum(rate(native_histogram_series[20m]))", + }, + { + name: "histogram_count with short rate interval", + query: "histogram_count(sum(rate(native_histogram_series[2m])))", + }, + { + name: "histogram_count with long rate interval", + query: "histogram_count(sum(rate(native_histogram_series[20m])))", + }, + } + + opts := promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 50000000, + Timeout: 100 * time.Second, + EnableAtModifier: true, + EnableNegativeOffset: true, + } + + b.ResetTimer() + b.ReportAllocs() + + for _, tc := range cases { + b.Run(tc.name, func(b *testing.B) { + ng := promqltest.NewTestEngineWithOpts(b, opts) + for i := 0; i < b.N; i++ { + qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step) + if err != nil { + b.Fatal(err) + } + if result := qry.Exec(context.Background()); result.Err != nil { + b.Fatal(result.Err) + } + } + }) + } +} + +func generateNativeHistogramSeries(app storage.Appender, numSeries int) error { + commonLabels := []string{labels.MetricName, "native_histogram_series", "foo", "bar"} + series := make([][]*histogram.Histogram, numSeries) + for i := range series { + series[i] = tsdbutil.GenerateTestHistograms(2000) + } + higherSchemaHist := &histogram.Histogram{ + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -5, Length: 2}, // -5 -4 + {Offset: 2, Length: 3}, // -1 0 1 + {Offset: 2, Length: 2}, // 4 5 + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 3}, + Count: 13, + } + for sid, histograms := range series { + seriesLabels := labels.FromStrings(append(commonLabels, "h", strconv.Itoa(sid))...) + for i := range histograms { + ts := time.Unix(int64(i*15), 0).UnixMilli() + if i == 0 { + // Inject a histogram with a higher schema. + if _, err := app.AppendHistogram(0, seriesLabels, ts, higherSchemaHist, nil); err != nil { + return err + } + } + if _, err := app.AppendHistogram(0, seriesLabels, ts, histograms[i], nil); err != nil { + return err + } + } + } + + return nil +} + func BenchmarkParser(b *testing.B) { cases := []string{ "a", diff --git a/promql/engine.go b/promql/engine.go index cafbab4e33..e10be63783 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -19,23 +19,24 @@ import ( "context" "errors" "fmt" + "io" "math" "reflect" "runtime" + "slices" "sort" "strconv" + "strings" "sync" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -44,6 +45,7 @@ import ( "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/zeropool" ) @@ -59,6 +61,14 @@ const ( maxInt64 = 9223372036854774784 // The smallest SampleValue that can be converted to an int64 without underflow. minInt64 = -9223372036854775808 + + // Max initial size for the pooled points slices. + // The getHPointSlice and getFPointSlice functions are called with an estimated size which often can be + // over-estimated. + maxPointsSliceSize = 5000 + + // The default buffer size for points used by the matrix selector. + matrixSelectorSliceSize = 16 ) type engineMetrics struct { @@ -106,6 +116,12 @@ func (e ErrStorage) Error() string { return e.Err.Error() } +// QueryEngine defines the interface for the *promql.Engine, so it can be replaced, wrapped or mocked. +type QueryEngine interface { + NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error) + NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) +} + // QueryLogger is an interface that can be used to log all the queries logged // by the engine. type QueryLogger interface { @@ -256,6 +272,8 @@ func contextErr(err error, env string) error { // // 2) Enforcement of the maximum number of concurrent queries. type QueryTracker interface { + io.Closer + // GetMaxConcurrent returns maximum number of concurrent queries that are allowed by this tracker. GetMaxConcurrent() int @@ -298,6 +316,11 @@ type EngineOpts struct { // EnablePerStepStats if true allows for per-step stats to be computed on request. Disabled otherwise. EnablePerStepStats bool + + // EnableDelayedNameRemoval delays the removal of the __name__ label to the last step of the query evaluation. + // This is useful in certain scenarios where the __name__ label must be preserved or where applying a + // regex-matcher to the __name__ label may otherwise lead to duplicate labelset errors. + EnableDelayedNameRemoval bool } // Engine handles the lifetime of queries from beginning to end. @@ -315,6 +338,7 @@ type Engine struct { enableAtModifier bool enableNegativeOffset bool enablePerStepStats bool + enableDelayedNameRemoval bool } // NewEngine returns a new engine. @@ -405,7 +429,20 @@ func NewEngine(opts EngineOpts) *Engine { enableAtModifier: opts.EnableAtModifier, enableNegativeOffset: opts.EnableNegativeOffset, enablePerStepStats: opts.EnablePerStepStats, + enableDelayedNameRemoval: opts.EnableDelayedNameRemoval, + } +} + +// Close closes ng. +func (ng *Engine) Close() error { + if ng == nil { + return nil + } + + if ng.activeQueryTracker != nil { + return ng.activeQueryTracker.Close() } + return nil } // SetQueryLogger sets the query logger. @@ -558,7 +595,8 @@ func (ng *Engine) validateOpts(expr parser.Expr) error { return validationErr } -func (ng *Engine) newTestQuery(f func(context.Context) error) Query { +// NewTestQuery injects special behaviour into Query for testing. +func (ng *Engine) NewTestQuery(f func(context.Context) error) Query { qry := &query{ q: "test statement", stmt: parser.TestStmt(f), @@ -573,7 +611,7 @@ func (ng *Engine) newTestQuery(f func(context.Context) error) Query { // // At this point per query only one EvalStmt is evaluated. Alert and record // statements are not handled by the Engine. -func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storage.Warnings, err error) { +func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annotations.Annotations, err error) { ng.metrics.currentQueries.Inc() defer func() { ng.metrics.currentQueries.Dec() @@ -666,17 +704,17 @@ func durationMilliseconds(d time.Duration) int64 { } // execEvalStmt evaluates the expression of an evaluation statement for the given time range. -func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, storage.Warnings, error) { +func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, annotations.Annotations, error) { prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime) - mint, maxt := ng.findMinMaxTime(s) - querier, err := query.queryable.Querier(ctxPrepare, mint, maxt) + mint, maxt := FindMinMaxTime(s) + querier, err := query.queryable.Querier(mint, maxt) if err != nil { prepareSpanTimer.Finish() return nil, nil, err } defer querier.Close() - ng.populateSeries(querier, s) + ng.populateSeries(ctxPrepare, querier, s) prepareSpanTimer.Finish() // Modify the offset of vector and matrix selectors for the @ modifier @@ -690,16 +728,16 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval startTimestamp: start, endTimestamp: start, interval: 1, - ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, lookbackDelta: s.LookbackDelta, samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ng.enableDelayedNameRemoval, } query.sampleStats.InitStepTracking(start, start, 1) - val, warnings, err := evaluator.Eval(s.Expr) + val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) evalSpanTimer.Finish() @@ -727,15 +765,16 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval // Point might have a different timestamp, force it to the evaluation // timestamp as that is when we ran the evaluation. if len(s.Histograms) > 0 { - vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start} + vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start, DropName: s.DropName} } else { - vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start} + vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start, DropName: s.DropName} } } return vector, warnings, nil case parser.ValueTypeScalar: return Scalar{V: mat[0].Floats[0].F, T: start}, warnings, nil case parser.ValueTypeMatrix: + ng.sortMatrixResult(ctx, query, mat) return mat, warnings, nil default: panic(fmt.Errorf("promql.Engine.exec: unexpected expression type %q", s.Expr.Type())) @@ -747,15 +786,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval startTimestamp: timeMilliseconds(s.Start), endTimestamp: timeMilliseconds(s.End), interval: durationMilliseconds(s.Interval), - ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, lookbackDelta: s.LookbackDelta, samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ng.enableDelayedNameRemoval, } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) - val, warnings, err := evaluator.Eval(s.Expr) + val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) evalSpanTimer.Finish() @@ -774,11 +813,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval } // TODO(fabxc): where to ensure metric labels are a copy from the storage internals. + ng.sortMatrixResult(ctx, query, mat) + + return mat, warnings, nil +} + +func (ng *Engine) sortMatrixResult(ctx context.Context, query *query, mat Matrix) { sortSpanTimer, _ := query.stats.GetSpanTimer(ctx, stats.ResultSortTime, ng.metrics.queryResultSort) sort.Sort(mat) sortSpanTimer.Finish() - - return mat, warnings, nil } // subqueryTimes returns the sum of offsets and ranges of all subqueries in the path. @@ -810,7 +853,10 @@ func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) { return subqOffset, subqRange, tsp } -func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { +// FindMinMaxTime returns the time in milliseconds of the earliest and latest point in time the statement will try to process. +// This takes into account offsets, @ modifiers, and range selectors. +// If the statement does not select series, then FindMinMaxTime returns (0, 0). +func FindMinMaxTime(s *parser.EvalStmt) (int64, int64) { var minTimestamp, maxTimestamp int64 = math.MaxInt64, math.MinInt64 // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. // The evaluation of the VectorSelector inside then evaluates the given range and unsets @@ -819,7 +865,7 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { switch n := node.(type) { case *parser.VectorSelector: - start, end := ng.getTimeRangesForSelector(s, n, path, evalRange) + start, end := getTimeRangesForSelector(s, n, path, evalRange) if start < minTimestamp { minTimestamp = start } @@ -842,7 +888,7 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { return minTimestamp, maxTimestamp } -func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path []parser.Node, evalRange time.Duration) (int64, int64) { +func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path []parser.Node, evalRange time.Duration) (int64, int64) { start, end := timestamp.FromTime(s.Start), timestamp.FromTime(s.End) subqOffset, subqRange, subqTs := subqueryTimes(path) @@ -863,11 +909,17 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS } if evalRange == 0 { - start -= durationMilliseconds(s.LookbackDelta) + // Reduce the start by one fewer ms than the lookback delta + // because wo want to exclude samples that are precisely the + // lookback delta before the eval time. + start -= durationMilliseconds(s.LookbackDelta) - 1 } else { - // For all matrix queries we want to ensure that we have (end-start) + range selected - // this way we have `range` data before the start time - start -= durationMilliseconds(evalRange) + // For all matrix queries we want to ensure that we have + // (end-start) + range selected this way we have `range` data + // before the start time. We subtract one from the range to + // exclude samples positioned directly at the lower boundary of + // the range. + start -= durationMilliseconds(evalRange) - 1 } offsetMilliseconds := durationMilliseconds(n.OriginalOffset) @@ -890,7 +942,7 @@ func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration { return interval } -func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { +func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s *parser.EvalStmt) { // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. // The evaluation of the VectorSelector inside then evaluates the given range and unsets // the variable. @@ -899,7 +951,7 @@ func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { switch n := node.(type) { case *parser.VectorSelector: - start, end := ng.getTimeRangesForSelector(s, n, path, evalRange) + start, end := getTimeRangesForSelector(s, n, path, evalRange) interval := ng.getLastSubqueryInterval(path) if interval == 0 { interval = s.Interval @@ -913,7 +965,7 @@ func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { } evalRange = 0 hints.By, hints.Grouping = extractGroupsFromPath(path) - n.UnexpandedSeriesSet = querier.Select(false, hints, n.LabelMatchers...) + n.UnexpandedSeriesSet = querier.Select(ctx, false, hints, n.LabelMatchers...) case *parser.MatrixSelector: evalRange = n.Range @@ -952,7 +1004,9 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) { return false, nil } -func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (storage.Warnings, error) { +// checkAndExpandSeriesSet expands expr's UnexpandedSeriesSet into expr's Series. +// If the Series field is already non-nil, it's a no-op. +func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations.Annotations, error) { switch e := expr.(type) { case *parser.MatrixSelector: return checkAndExpandSeriesSet(ctx, e.VectorSelector) @@ -960,14 +1014,22 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (storage.War if e.Series != nil { return nil, nil } + span := trace.SpanFromContext(ctx) + span.AddEvent("expand start", trace.WithAttributes(attribute.String("selector", e.String()))) series, ws, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet) + if e.SkipHistogramBuckets { + for i := range series { + series[i] = newHistogramStatsSeries(series[i]) + } + } e.Series = series + span.AddEvent("expand end", trace.WithAttributes(attribute.Int("num_series", len(series)))) return ws, err } return nil, nil } -func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, ws storage.Warnings, err error) { +func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, ws annotations.Annotations, err error) { for it.Next() { select { case <-ctx.Done(): @@ -981,7 +1043,7 @@ func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.S type errWithWarnings struct { err error - warnings storage.Warnings + warnings annotations.Annotations } func (e errWithWarnings) Error() string { return e.err.Error() } @@ -991,8 +1053,6 @@ func (e errWithWarnings) Error() string { return e.err.Error() } // querier and reports errors. On timeout or cancellation of its context it // terminates. type evaluator struct { - ctx context.Context - startTimestamp int64 // Start time in milliseconds. endTimestamp int64 // End time in milliseconds. interval int64 // Interval in milliseconds. @@ -1003,6 +1063,7 @@ type evaluator struct { lookbackDelta time.Duration samplesStats *stats.QuerySamples noStepSubqueryIntervalFn func(rangeMillis int64) int64 + enableDelayedNameRemoval bool } // errorf causes a panic with the input formatted into an error. @@ -1016,7 +1077,7 @@ func (ev *evaluator) error(err error) { } // recover is the handler that turns panics into returns from the top level of evaluation. -func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error) { +func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp *error) { e := recover() if e == nil { return @@ -1028,11 +1089,11 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] - level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf)) + level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err - *ws = append(*ws, err.warnings...) + ws.Merge(err.warnings) case error: *errp = err default: @@ -1040,17 +1101,18 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error } } -func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings, err error) { +func (ev *evaluator) Eval(ctx context.Context, expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { defer ev.recover(expr, &ws, &err) - v, ws = ev.eval(expr) + v, ws = ev.eval(ctx, expr) + if ev.enableDelayedNameRemoval { + ev.cleanupMetricLabels(v) + } return v, ws, nil } // EvalSeriesHelper stores extra information about a series. type EvalSeriesHelper struct { - // The grouping key used by aggregation. - groupingKey uint64 // Used to map left-hand to right-hand in binary operations. signature string } @@ -1063,12 +1125,8 @@ type EvalNodeHelper struct { Out Vector // Caches. - // DropMetricName and label_*. - Dmn map[uint64]labels.Labels - // funcHistogramQuantile for conventional histograms. + // funcHistogramQuantile for classic histograms. signatureToMetricWithBuckets map[string]*metricWithBuckets - // label_replace. - regex *regexp.Regexp lb *labels.Builder lblBuf []byte @@ -1078,6 +1136,9 @@ type EvalNodeHelper struct { rightSigs map[string]Sample matchedSigs map[string]map[uint64]struct{} resultMetric map[string]labels.Labels + + // Additional options for the evaluation. + enableDelayedNameRemoval bool } func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { @@ -1088,40 +1149,25 @@ func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { } } -// DropMetricName is a cached version of DropMetricName. -func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels { - if enh.Dmn == nil { - enh.Dmn = make(map[uint64]labels.Labels, len(enh.Out)) - } - h := l.Hash() - ret, ok := enh.Dmn[h] - if ok { - return ret - } - ret = dropMetricName(l) - enh.Dmn[h] = ret - return ret -} - // rangeEval evaluates the given expressions, and then for each step calls // the given funcCall with the values computed for each expression at that // step. The return value is the combination into time series of all the // function call results. // The prepSeries function (if provided) can be used to prepare the helper // for each series, then passed to each call funcCall. -func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) { +func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) originalNumSamples := ev.currentSamples - var warnings storage.Warnings + var warnings annotations.Annotations for i, e := range exprs { // Functions will take string arguments from the expressions, not the values. if e != nil && e.Type() != parser.ValueTypeString { // ev.currentSamples will be updated to the correct value within the ev.eval call. - val, ws := ev.eval(e) - warnings = append(warnings, ws...) + val, ws := ev.eval(ctx, e) + warnings.Merge(ws) matrixes[i] = val.(Matrix) // Keep a copy of the original point slices so that they @@ -1142,8 +1188,12 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) biggestLen = len(matrixes[i]) } } - enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)} - seriess := make(map[uint64]Series, biggestLen) // Output series by series hash. + enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} + type seriesAndTimestamp struct { + Series + ts int64 + } + seriess := make(map[uint64]seriesAndTimestamp, biggestLen) // Output series by series hash. tempNumSamples := ev.currentSamples var ( @@ -1162,15 +1212,13 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) bufHelpers[i] = make([]EvalSeriesHelper, len(matrixes[i])) for si, series := range matrixes[i] { - h := seriesHelpers[i][si] - prepSeries(series.Metric, &h) - seriesHelpers[i][si] = h + prepSeries(series.Metric, &seriesHelpers[i][si]) } } } for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } // Reset number of samples in memory after each timestamp. @@ -1184,41 +1232,27 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } for si, series := range matrixes[i] { - for _, point := range series.Floats { - if point.T == ts { - if ev.currentSamples < ev.maxSamples { - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: point.F, T: ts}) - if prepSeries != nil { - bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) - } - - // Move input vectors forward so we don't have to re-scan the same - // past points at the next step. - matrixes[i][si].Floats = series.Floats[1:] - ev.currentSamples++ - } else { - ev.error(ErrTooManySamples(env)) - } - } - break + switch { + case len(series.Floats) > 0 && series.Floats[0].T == ts: + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts, DropName: series.DropName}) + // Move input vectors forward so we don't have to re-scan the same + // past points at the next step. + matrixes[i][si].Floats = series.Floats[1:] + case len(series.Histograms) > 0 && series.Histograms[0].T == ts: + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts, DropName: series.DropName}) + matrixes[i][si].Histograms = series.Histograms[1:] + default: + continue } - for _, point := range series.Histograms { - if point.T == ts { - if ev.currentSamples < ev.maxSamples { - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: point.H, T: ts}) - if prepSeries != nil { - bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) - } - - // Move input vectors forward so we don't have to re-scan the same - // past points at the next step. - matrixes[i][si].Histograms = series.Histograms[1:] - ev.currentSamples++ - } else { - ev.error(ErrTooManySamples(env)) - } - } - break + if prepSeries != nil { + bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) + } + // Don't add histogram size here because we only + // copy the pointer above, not the whole + // histogram. + ev.currentSamples++ + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) } } args[i] = vectors[i] @@ -1228,31 +1262,31 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // Make the function call. enh.Ts = ts result, ws := funcCall(args, bufHelpers, enh) - if result.ContainsSameLabelset() { - ev.errorf("vector cannot contain metrics with the same labelset") - } enh.Out = result[:0] // Reuse result vector. - warnings = append(warnings, ws...) + warnings.Merge(ws) - ev.currentSamples += len(result) + vecNumSamples := result.TotalSamples() + ev.currentSamples += vecNumSamples // When we reset currentSamples to tempNumSamples during the next iteration of the loop it also // needs to include the samples from the result here, as they're still in memory. - tempNumSamples += len(result) + tempNumSamples += vecNumSamples ev.samplesStats.UpdatePeak(ev.currentSamples) if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) } - ev.samplesStats.UpdatePeak(ev.currentSamples) // If this could be an instant query, shortcut so as not to change sort order. if ev.endTimestamp == ev.startTimestamp { + if !ev.enableDelayedNameRemoval && result.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } mat := make(Matrix, len(result)) for i, s := range result { if s.H == nil { - mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}} + mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}, DropName: s.DropName} } else { - mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}} + mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}, DropName: s.DropName} } } ev.currentSamples = originalNumSamples + mat.TotalSamples() @@ -1264,20 +1298,15 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) for _, sample := range result { h := sample.Metric.Hash() ss, ok := seriess[h] - if !ok { - ss = Series{Metric: sample.Metric} - } - if sample.H == nil { - if ss.Floats == nil { - ss.Floats = getFPointSlice(numSteps) + if ok { + if ss.ts == ts { // If we've seen this output series before at this timestamp, it's a duplicate. + ev.errorf("vector cannot contain metrics with the same labelset") } - ss.Floats = append(ss.Floats, FPoint{T: ts, F: sample.F}) + ss.ts = ts } else { - if ss.Histograms == nil { - ss.Histograms = getHPointSlice(numSteps) - } - ss.Histograms = append(ss.Histograms, HPoint{T: ts, H: sample.H}) + ss = seriesAndTimestamp{Series{Metric: sample.Metric, DropName: sample.DropName}, ts} } + addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps) seriess[h] = ss } } @@ -1292,20 +1321,213 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // Assemble the output matrix. By the time we get here we know we don't have too many samples. mat := make(Matrix, 0, len(seriess)) for _, ss := range seriess { - mat = append(mat, ss) + mat = append(mat, ss.Series) } ev.currentSamples = originalNumSamples + mat.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) return mat, warnings } +func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) { + // Keep a copy of the original point slice so that it can be returned to the pool. + origMatrix := slices.Clone(inputMatrix) + defer func() { + for _, s := range origMatrix { + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) + } + }() + + var warnings annotations.Annotations + + enh := &EvalNodeHelper{enableDelayedNameRemoval: ev.enableDelayedNameRemoval} + tempNumSamples := ev.currentSamples + + // Create a mapping from input series to output groups. + buf := make([]byte, 0, 1024) + groupToResultIndex := make(map[uint64]int) + seriesToResult := make([]int, len(inputMatrix)) + var result Matrix + + groupCount := 0 + for si, series := range inputMatrix { + var groupingKey uint64 + groupingKey, buf = generateGroupingKey(series.Metric, sortedGrouping, aggExpr.Without, buf) + index, ok := groupToResultIndex[groupingKey] + // Add a new group if it doesn't exist. + if !ok { + if aggExpr.Op != parser.TOPK && aggExpr.Op != parser.BOTTOMK && aggExpr.Op != parser.LIMITK && aggExpr.Op != parser.LIMIT_RATIO { + m := generateGroupingLabels(enh, series.Metric, aggExpr.Without, sortedGrouping) + result = append(result, Series{Metric: m}) + } + index = groupCount + groupToResultIndex[groupingKey] = index + groupCount++ + } + seriesToResult[si] = index + } + groups := make([]groupedAggregation, groupCount) + + var k int + var ratio float64 + var seriess map[uint64]Series + switch aggExpr.Op { + case parser.TOPK, parser.BOTTOMK, parser.LIMITK: + if !convertibleToInt64(param) { + ev.errorf("Scalar value %v overflows int64", param) + } + k = int(param) + if k > len(inputMatrix) { + k = len(inputMatrix) + } + if k < 1 { + return nil, warnings + } + seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash. + case parser.LIMIT_RATIO: + if math.IsNaN(param) { + ev.errorf("Ratio value %v is NaN", param) + } + switch { + case param == 0: + return nil, warnings + case param < -1.0: + ratio = -1.0 + warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange())) + case param > 1.0: + ratio = 1.0 + warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange())) + default: + ratio = param + } + seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash. + case parser.QUANTILE: + if math.IsNaN(param) || param < 0 || param > 1 { + warnings.Add(annotations.NewInvalidQuantileWarning(param, aggExpr.Param.PositionRange())) + } + } + + for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { + if err := contextDone(ctx, "expression evaluation"); err != nil { + ev.error(err) + } + // Reset number of samples in memory after each timestamp. + ev.currentSamples = tempNumSamples + + // Make the function call. + enh.Ts = ts + var ws annotations.Annotations + switch aggExpr.Op { + case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO: + result, ws = ev.aggregationK(aggExpr, k, ratio, inputMatrix, seriesToResult, groups, enh, seriess) + // If this could be an instant query, shortcut so as not to change sort order. + if ev.endTimestamp == ev.startTimestamp { + warnings.Merge(ws) + return result, warnings + } + default: + ws = ev.aggregation(aggExpr, param, inputMatrix, result, seriesToResult, groups, enh) + } + + warnings.Merge(ws) + + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + } + + // Assemble the output matrix. By the time we get here we know we don't have too many samples. + switch aggExpr.Op { + case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO: + result = make(Matrix, 0, len(seriess)) + for _, ss := range seriess { + result = append(result, ss) + } + default: + // Remove empty result rows. + dst := 0 + for _, series := range result { + if len(series.Floats) > 0 || len(series.Histograms) > 0 { + result[dst] = series + dst++ + } + } + result = result[:dst] + } + return result, warnings +} + +// evalVectorSelector generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from vs. +// vs.Series has to be expanded before calling this method. +// For every series iterator in vs.Series, the method iterates in ev.interval sized steps from ev.startTimestamp until and including ev.endTimestamp, +// collecting every corresponding sample (obtained via ev.vectorSelectorSingle) into a Series. +// All of the generated Series are collected into a Matrix, that gets returned. +func (ev *evaluator) evalVectorSelector(ctx context.Context, vs *parser.VectorSelector) Matrix { + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + + mat := make(Matrix, 0, len(vs.Series)) + var prevSS *Series + it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator + for _, s := range vs.Series { + if err := contextDone(ctx, "expression evaluation"); err != nil { + ev.error(err) + } + + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) + ss := Series{ + Metric: s.Labels(), + } + + for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { + step++ + _, f, h, ok := ev.vectorSelectorSingle(it, vs, ts) + if !ok { + continue + } + + if h == nil { + ev.currentSamples++ + ev.samplesStats.IncrementSamplesAtStep(step, 1) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if ss.Floats == nil { + ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) + } + ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) + } else { + point := HPoint{H: h, T: ts} + histSize := point.size() + ev.currentSamples += histSize + ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize)) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if ss.Histograms == nil { + ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps) + } + ss.Histograms = append(ss.Histograms, point) + } + } + + if len(ss.Floats)+len(ss.Histograms) > 0 { + mat = append(mat, ss) + prevSS = &mat[len(mat)-1] + } + } + ev.samplesStats.UpdatePeak(ev.currentSamples) + return mat +} + // evalSubquery evaluates given SubqueryExpr and returns an equivalent // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. -func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, storage.Warnings) { +func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { samplesStats := ev.samplesStats // Avoid double counting samples when running a subquery, those samples will be counted in later stage. ev.samplesStats = ev.samplesStats.NewChild() - val, ws := ev.eval(subq) + val, ws := ev.eval(ctx, subq) // But do incorporate the peak from the subquery samplesStats.UpdatePeakFromSubquery(ev.samplesStats) ev.samplesStats = samplesStats @@ -1325,27 +1547,27 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele Range: subq.Range, VectorSelector: vs, } - totalSamples := 0 for _, s := range mat { - totalSamples += len(s.Floats) + len(s.Histograms) vs.Series = append(vs.Series, NewStorageSeries(s)) } - return ms, totalSamples, ws + return ms, mat.TotalSamples(), ws } // eval evaluates the given expression as the given AST expression node requires. -func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { +func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, annotations.Annotations) { // This is the top-level evaluation method. // Thus, we check for timeout/cancellation here. - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 // Create a new span to help investigate inner evaluation performances. - ctxWithSpan, span := otel.Tracer("").Start(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) - ev.ctx = ctxWithSpan + ctx, span := otel.Tracer("").Start(ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) defer span.End() + if ss, ok := expr.(interface{ ShortString() string }); ok { + span.SetAttributes(attribute.String("operation", ss.ShortString())) + } switch e := expr.(type) { case *parser.AggregateExpr: @@ -1353,28 +1575,44 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { sortedGrouping := e.Grouping slices.Sort(sortedGrouping) - // Prepare a function to initialise series helpers with the grouping key. - buf := make([]byte, 0, 1024) - initSeries := func(series labels.Labels, h *EvalSeriesHelper) { - h.groupingKey, buf = generateGroupingKey(series, sortedGrouping, e.Without, buf) - } - unwrapParenExpr(&e.Param) param := unwrapStepInvariantExpr(e.Param) unwrapParenExpr(¶m) - if s, ok := param.(*parser.StringLiteral); ok { - return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.aggregation(e.Op, sortedGrouping, e.Without, s.Val, v[0].(Vector), sh[0], enh), nil + + if e.Op == parser.COUNT_VALUES { + valueLabel := param.(*parser.StringLiteral) + if !model.LabelName(valueLabel.Val).IsValid() { + ev.errorf("invalid label name %q", valueLabel) + } + if !e.Without { + sortedGrouping = append(sortedGrouping, valueLabel.Val) + slices.Sort(sortedGrouping) + } + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh) }, e.Expr) } - return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - var param float64 - if e.Param != nil { - param = v[0].(Vector)[0].F - } - return ev.aggregation(e.Op, sortedGrouping, e.Without, param, v[1].(Vector), sh[1], enh), nil - }, e.Param, e.Expr) + var warnings annotations.Annotations + originalNumSamples := ev.currentSamples + // param is the number k for topk/bottomk, or q for quantile. + var fParam float64 + if param != nil { + val, ws := ev.eval(ctx, param) + warnings.Merge(ws) + fParam = val.(Matrix)[0].Floats[0].F + } + // Now fetch the data to be aggregated. + val, ws := ev.eval(ctx, e.Expr) + warnings.Merge(ws) + inputMatrix := val.(Matrix) + + result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fParam) + warnings.Merge(ws) + ev.currentSamples = originalNumSamples + result.TotalSamples() + ev.samplesStats.UpdatePeak(ev.currentSamples) + + return result, warnings case *parser.Call: call := FunctionCalls[e.Func.Name] @@ -1387,7 +1625,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { unwrapParenExpr(&arg) vs, ok := arg.(*parser.VectorSelector) if ok { - return ev.rangeEvalTimestampFunctionOverVectorSelector(vs, call, e) + return ev.rangeEvalTimestampFunctionOverVectorSelector(ctx, vs, call, e) } } @@ -1395,7 +1633,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { var ( matrixArgIndex int matrixArg bool - warnings storage.Warnings + warnings annotations.Annotations ) for i := range e.Args { unwrapParenExpr(&e.Args[i]) @@ -1411,9 +1649,9 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { matrixArgIndex = i matrixArg = true // Replacing parser.SubqueryExpr with parser.MatrixSelector. - val, totalSamples, ws := ev.evalSubquery(subq) + val, totalSamples, ws := ev.evalSubquery(ctx, subq) e.Args[i] = val - warnings = append(warnings, ws...) + warnings.Merge(ws) defer func() { // subquery result takes space in the memory. Get rid of that at the end. val.VectorSelector.(*parser.VectorSelector).Series = nil @@ -1422,10 +1660,20 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { break } } + + // Special handling for functions that work on series not samples. + switch e.Func.Name { + case "label_replace": + return ev.evalLabelReplace(ctx, e.Args) + case "label_join": + return ev.evalLabelJoin(ctx, e.Args) + } + if !matrixArg { // Does not have a matrix argument. - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return call(v, e.Args, enh), warnings + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, annos := call(v, e.Args, enh) + return vec, warnings.Merge(annos) }, e.Args...) } @@ -1435,11 +1683,11 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { otherInArgs := make([]Vector, len(e.Args)) for i, e := range e.Args { if i != matrixArgIndex { - val, ws := ev.eval(e) + val, ws := ev.eval(ctx, e) otherArgs[i] = val.(Matrix) otherInArgs[i] = Vector{Sample{}} inArgs[i] = otherInArgs[i] - warnings = append(warnings, ws...) + warnings.Merge(ws) } } @@ -1449,8 +1697,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { sel := arg.(*parser.MatrixSelector) selVS := sel.VectorSelector.(*parser.VectorSelector) - ws, err := checkAndExpandSeriesSet(ev.ctx, sel) - warnings = append(warnings, ws...) + ws, err := checkAndExpandSeriesSet(ctx, sel) + warnings.Merge(ws) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings}) } @@ -1464,14 +1712,25 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { // Reuse objects across steps to save memory allocations. var floats []FPoint var histograms []HPoint + var prevSS *Series inMatrix := make(Matrix, 1) inArgs[matrixArgIndex] = inMatrix - enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} + enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} // Process all the calls for one time series at a time. it := storage.NewBuffer(selRange) var chkIter chunkenc.Iterator + + // The last_over_time function acts like offset; thus, it + // should keep the metric name. For all the other range + // vector functions, the only change needed is to drop the + // metric name in the output. + dropName := e.Func.Name != "last_over_time" + for i, s := range selVS.Series { - ev.currentSamples -= len(floats) + len(histograms) + if err := contextDone(ctx, "expression evaluation"); err != nil { + ev.error(err) + } + ev.currentSamples -= len(floats) + totalHPointSize(histograms) if floats != nil { floats = floats[:0] } @@ -1481,15 +1740,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { chkIter = s.Iterator(chkIter) it.Reset(chkIter) metric := selVS.Series[i].Labels() - // The last_over_time function acts like offset; thus, it - // should keep the metric name. For all the other range - // vector functions, the only change needed is to drop the - // metric name in the output. - if e.Func.Name != "last_over_time" { - metric = dropMetricName(metric) + if !ev.enableDelayedNameRemoval && dropName { + metric = metric.DropMetricName() } ss := Series{ - Metric: metric, + Metric: metric, + DropName: dropName, } inMatrix[0].Metric = selVS.Series[i].Labels() for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -1502,10 +1758,14 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { otherInArgs[j][0].F = otherArgs[j][0].Floats[step].F } } - maxt := ts - offset - mint := maxt - selRange - // Evaluate the matrix selector for this series for this step. - floats, histograms = ev.matrixIterSlice(it, mint, maxt, floats, histograms) + // Evaluate the matrix selector for this series + // for this step, but only if this is the 1st + // iteration or no @ modifier has been used. + if ts == ev.startTimestamp || selVS.Timestamp == nil { + maxt := ts - offset + mint := maxt - selRange + floats, histograms = ev.matrixIterSlice(it, mint, maxt, floats, histograms) + } if len(floats)+len(histograms) == 0 { continue } @@ -1513,18 +1773,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { inMatrix[0].Histograms = histograms enh.Ts = ts // Make the function call. - outVec := call(inArgs, e.Args, enh) - ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+len(histograms))) + outVec, annos := call(inArgs, e.Args, enh) + warnings.Merge(annos) + ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+totalHPointSize(histograms))) + enh.Out = outVec[:0] if len(outVec) > 0 { if outVec[0].H == nil { if ss.Floats == nil { - ss.Floats = getFPointSlice(numSteps) + ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) } + ss.Floats = append(ss.Floats, FPoint{F: outVec[0].F, T: ts}) } else { if ss.Histograms == nil { - ss.Histograms = getHPointSlice(numSteps) + ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps) } ss.Histograms = append(ss.Histograms, HPoint{H: outVec[0].H, T: ts}) } @@ -1532,21 +1795,34 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { // Only buffer stepRange milliseconds from the second step on. it.ReduceDelta(stepRange) } - if len(ss.Floats)+len(ss.Histograms) > 0 { - if ev.currentSamples+len(ss.Floats)+len(ss.Histograms) <= ev.maxSamples { - mat = append(mat, ss) - ev.currentSamples += len(ss.Floats) + len(ss.Histograms) - } else { + histSamples := totalHPointSize(ss.Histograms) + + if len(ss.Floats)+histSamples > 0 { + if ev.currentSamples+len(ss.Floats)+histSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) } + mat = append(mat, ss) + prevSS = &mat[len(mat)-1] + ev.currentSamples += len(ss.Floats) + histSamples } ev.samplesStats.UpdatePeak(ev.currentSamples) + + if e.Func.Name == "rate" || e.Func.Name == "increase" { + metricName := inMatrix[0].Metric.Get(labels.MetricName) + if metricName != "" && len(ss.Floats) > 0 && + !strings.HasSuffix(metricName, "_total") && + !strings.HasSuffix(metricName, "_sum") && + !strings.HasSuffix(metricName, "_count") && + !strings.HasSuffix(metricName, "_bucket") { + warnings.Add(annotations.NewPossibleNonCounterInfo(metricName, e.Args[0].PositionRange())) + } + } } ev.samplesStats.UpdatePeak(ev.currentSamples) - ev.currentSamples -= len(floats) + len(histograms) + ev.currentSamples -= len(floats) + totalHPointSize(histograms) putFPointSlice(floats) - putHPointSlice(histograms) + putMatrixSelectorHPointSlice(histograms) // The absent_over_time function returns 0 or 1 series. So far, the matrix // contains multiple series. The following code will create a new series @@ -1583,32 +1859,38 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { return Matrix{ Series{ - Metric: createLabelsForAbsentFunction(e.Args[0]), - Floats: newp, + Metric: createLabelsForAbsentFunction(e.Args[0]), + Floats: newp, + DropName: dropName, }, }, warnings } - if mat.ContainsSameLabelset() { + if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } - return mat, warnings case *parser.ParenExpr: - return ev.eval(e.Expr) + return ev.eval(ctx, e.Expr) case *parser.UnaryExpr: - val, ws := ev.eval(e.Expr) + val, ws := ev.eval(ctx, e.Expr) mat := val.(Matrix) if e.Op == parser.SUB { for i := range mat { - mat[i].Metric = dropMetricName(mat[i].Metric) + if !ev.enableDelayedNameRemoval { + mat[i].Metric = mat[i].Metric.DropMetricName() + } + mat[i].DropName = true for j := range mat[i].Floats { mat[i].Floats[j].F = -mat[i].Floats[j].F } + for j := range mat[i].Histograms { + mat[i].Histograms[j].H = mat[i].Histograms[j].H.Copy().Mul(-1) + } } - if mat.ContainsSameLabelset() { + if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } } @@ -1617,7 +1899,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case *parser.BinaryExpr: switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) @@ -1630,106 +1912,73 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } switch e.Op { case parser.LAND: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LOR: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LUNLESS: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) default: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh) + return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh) + return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh) + return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case *parser.NumberLiteral: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + span.SetAttributes(attribute.Float64("value", e.Val)) + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: + span.SetAttributes(attribute.String("value", e.Val)) return String{V: e.Val, T: ev.startTimestamp}, nil case *parser.VectorSelector: - ws, err := checkAndExpandSeriesSet(ev.ctx, e) + ws, err := checkAndExpandSeriesSet(ctx, e) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } - mat := make(Matrix, 0, len(e.Series)) - it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) - var chkIter chunkenc.Iterator - for i, s := range e.Series { - chkIter = s.Iterator(chkIter) - it.Reset(chkIter) - ss := Series{ - Metric: e.Series[i].Labels(), - } - - for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { - step++ - _, f, h, ok := ev.vectorSelectorSingle(it, e, ts) - if ok { - if ev.currentSamples < ev.maxSamples { - if h == nil { - if ss.Floats == nil { - ss.Floats = getFPointSlice(numSteps) - } - ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) - } else { - if ss.Histograms == nil { - ss.Histograms = getHPointSlice(numSteps) - } - ss.Histograms = append(ss.Histograms, HPoint{H: h, T: ts}) - } - ev.samplesStats.IncrementSamplesAtStep(step, 1) - ev.currentSamples++ - } else { - ev.error(ErrTooManySamples(env)) - } - } - } - - if len(ss.Floats)+len(ss.Histograms) > 0 { - mat = append(mat, ss) - } - } - ev.samplesStats.UpdatePeak(ev.currentSamples) + mat := ev.evalVectorSelector(ctx, e) return mat, ws case *parser.MatrixSelector: if ev.startTimestamp != ev.endTimestamp { panic(errors.New("cannot do range evaluation of matrix selector")) } - return ev.matrixSelector(e) + return ev.matrixSelector(ctx, e) case *parser.SubqueryExpr: offsetMillis := durationMilliseconds(e.Offset) rangeMillis := durationMilliseconds(e.Range) newEv := &evaluator{ endTimestamp: ev.endTimestamp - offsetMillis, - ctx: ev.ctx, currentSamples: ev.currentSamples, maxSamples: ev.maxSamples, logger: ev.logger, lookbackDelta: ev.lookbackDelta, samplesStats: ev.samplesStats.NewChild(), noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ev.enableDelayedNameRemoval, } if e.Step != 0 { @@ -1752,7 +2001,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { setOffsetForAtModifier(newEv.startTimestamp, e.Expr) } - res, ws := newEv.eval(e.Expr) + res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) ev.samplesStats.IncrementSamplesAtTimestamp(ev.endTimestamp, newEv.samplesStats.TotalSamples) @@ -1760,22 +2009,22 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case *parser.StepInvariantExpr: switch ce := e.Expr.(type) { case *parser.StringLiteral, *parser.NumberLiteral: - return ev.eval(ce) + return ev.eval(ctx, ce) } newEv := &evaluator{ startTimestamp: ev.startTimestamp, endTimestamp: ev.startTimestamp, // Always a single evaluation. interval: ev.interval, - ctx: ev.ctx, currentSamples: ev.currentSamples, maxSamples: ev.maxSamples, logger: ev.logger, lookbackDelta: ev.lookbackDelta, samplesStats: ev.samplesStats.NewChild(), noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ev.enableDelayedNameRemoval, } - res, ws := newEv.eval(e.Expr) + res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -1806,13 +2055,15 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { T: ts, F: mat[i].Floats[0].F, }) + ev.currentSamples++ } else { - mat[i].Histograms = append(mat[i].Histograms, HPoint{ + point := HPoint{ T: ts, H: mat[i].Histograms[0].H, - }) + } + mat[i].Histograms = append(mat[i].Histograms, point) + ev.currentSamples += point.size() } - ev.currentSamples++ if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) } @@ -1825,8 +2076,32 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(fmt.Errorf("unhandled expression of type: %T", expr)) } -func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, storage.Warnings) { - ws, err := checkAndExpandSeriesSet(ev.ctx, vs) +// reuseOrGetHPointSlices reuses the space from previous slice to create new slice if the former has lots of room. +// The previous slices capacity is adjusted so when it is re-used from the pool it doesn't overflow into the new one. +func reuseOrGetHPointSlices(prevSS *Series, numSteps int) (r []HPoint) { + if prevSS != nil && cap(prevSS.Histograms)-2*len(prevSS.Histograms) > 0 { + r = prevSS.Histograms[len(prevSS.Histograms):] + prevSS.Histograms = prevSS.Histograms[0:len(prevSS.Histograms):len(prevSS.Histograms)] + return + } + + return getHPointSlice(numSteps) +} + +// reuseOrGetFPointSlices reuses the space from previous slice to create new slice if the former has lots of room. +// The previous slices capacity is adjusted so when it is re-used from the pool it doesn't overflow into the new one. +func reuseOrGetFPointSlices(prevSS *Series, numSteps int) (r []FPoint) { + if prevSS != nil && cap(prevSS.Floats)-2*len(prevSS.Floats) > 0 { + r = prevSS.Floats[len(prevSS.Floats):] + prevSS.Floats = prevSS.Floats[0:len(prevSS.Floats):len(prevSS.Floats)] + return + } + + return getFPointSlice(numSteps) +} + +func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Context, vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { + ws, err := checkAndExpandSeriesSet(ctx, vs) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -1834,10 +2109,10 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series)) for i, s := range vs.Series { it := s.Iterator(nil) - seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) + seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1) } - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -1848,24 +2123,26 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec vec := make(Vector, 0, len(vs.Series)) for i, s := range vs.Series { it := seriesIterators[i] - t, f, h, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) - if ok { - vec = append(vec, Sample{ - Metric: s.Labels(), - T: t, - F: f, - H: h, - }) + t, _, _, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) + if !ok { + continue + } - ev.currentSamples++ - ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, 1) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } + // Note that we ignore the sample values because call only cares about the timestamp. + vec = append(vec, Sample{ + Metric: s.Labels(), + T: t, + }) + + ev.currentSamples++ + ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, 1) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) } } ev.samplesStats.UpdatePeak(ev.currentSamples) - return call([]parser.Value{vec}, e.Args, enh), ws + vec, annos := call([]parser.Value{vec}, e.Args, enh) + return vec, ws.Merge(annos) }) } @@ -1894,7 +2171,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no if valueType == chunkenc.ValNone || t > refTime { var ok bool t, v, h, ok = it.PeekPrev() - if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { + if !ok || t <= refTime-durationMilliseconds(ev.lookbackDelta) { return 0, 0, nil, false } } @@ -1907,25 +2184,46 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no var ( fPointPool zeropool.Pool[[]FPoint] hPointPool zeropool.Pool[[]HPoint] + + // matrixSelectorHPool holds reusable histogram slices used by the matrix + // selector. The key difference between this pool and the hPointPool is that + // slices returned by this pool should never hold multiple copies of the same + // histogram pointer since histogram objects are reused across query evaluation + // steps. + matrixSelectorHPool zeropool.Pool[[]HPoint] ) func getFPointSlice(sz int) []FPoint { if p := fPointPool.Get(); p != nil { return p } + + if sz > maxPointsSliceSize { + sz = maxPointsSliceSize + } + return make([]FPoint, 0, sz) } +// putFPointSlice will return a FPoint slice of size max(maxPointsSliceSize, sz). +// This function is called with an estimated size which often can be over-estimated. func putFPointSlice(p []FPoint) { if p != nil { fPointPool.Put(p[:0]) } } +// getHPointSlice will return a HPoint slice of size max(maxPointsSliceSize, sz). +// This function is called with an estimated size which often can be over-estimated. func getHPointSlice(sz int) []HPoint { if p := hPointPool.Get(); p != nil { return p } + + if sz > maxPointsSliceSize { + sz = maxPointsSliceSize + } + return make([]HPoint, 0, sz) } @@ -1935,8 +2233,22 @@ func putHPointSlice(p []HPoint) { } } +func getMatrixSelectorHPoints() []HPoint { + if p := matrixSelectorHPool.Get(); p != nil { + return p + } + + return make([]HPoint, 0, matrixSelectorSliceSize) +} + +func putMatrixSelectorHPointSlice(p []HPoint) { + if p != nil { + matrixSelectorHPool.Put(p[:0]) + } +} + // matrixSelector evaluates a *parser.MatrixSelector expression. -func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storage.Warnings) { +func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSelector) (Matrix, annotations.Annotations) { var ( vs = node.VectorSelector.(*parser.VectorSelector) @@ -1947,7 +2259,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag it = storage.NewBuffer(durationMilliseconds(node.Range)) ) - ws, err := checkAndExpandSeriesSet(ev.ctx, node) + ws, err := checkAndExpandSeriesSet(ctx, node) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -1955,7 +2267,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag var chkIter chunkenc.Iterator series := vs.Series for i, s := range series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } chkIter = s.Iterator(chkIter) @@ -1965,10 +2277,10 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag } ss.Floats, ss.Histograms = ev.matrixIterSlice(it, mint, maxt, nil, nil) - totalLen := int64(len(ss.Floats)) + int64(len(ss.Histograms)) - ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalLen) + totalSize := int64(len(ss.Floats)) + int64(totalHPointSize(ss.Histograms)) + ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalSize) - if totalLen > 0 { + if totalSize > 0 { matrix = append(matrix, ss) } else { putFPointSlice(ss.Floats) @@ -1993,20 +2305,20 @@ func (ev *evaluator) matrixIterSlice( mintFloats, mintHistograms := mint, mint // First floats... - if len(floats) > 0 && floats[len(floats)-1].T >= mint { + if len(floats) > 0 && floats[len(floats)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; floats[drop].T < mint; drop++ { // nolint:revive + for drop = 0; floats[drop].T <= mint; drop++ { } ev.currentSamples -= drop copy(floats, floats[drop:]) floats = floats[:len(floats)-drop] // Only append points with timestamps after the last timestamp we have. - mintFloats = floats[len(floats)-1].T + 1 + mintFloats = floats[len(floats)-1].T } else { ev.currentSamples -= len(floats) if floats != nil { @@ -2015,22 +2327,27 @@ func (ev *evaluator) matrixIterSlice( } // ...then the same for histograms. TODO(beorn7): Use generics? - if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint { + if len(histograms) > 0 && histograms[len(histograms)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; histograms[drop].T < mint; drop++ { // nolint:revive + for drop = 0; histograms[drop].T <= mint; drop++ { } - ev.currentSamples -= drop + // Rotate the buffer around the drop index so that points before mint can be + // reused to store new histograms. + tail := make([]HPoint, drop) + copy(tail, histograms[:drop]) copy(histograms, histograms[drop:]) + copy(histograms[len(histograms)-drop:], tail) histograms = histograms[:len(histograms)-drop] + ev.currentSamples -= totalHPointSize(histograms) // Only append points with timestamps after the last timestamp we have. - mintHistograms = histograms[len(histograms)-1].T + 1 + mintHistograms = histograms[len(histograms)-1].T } else { - ev.currentSamples -= len(histograms) + ev.currentSamples -= totalHPointSize(histograms) if histograms != nil { histograms = histograms[:0] } @@ -2050,20 +2367,27 @@ loop: case chunkenc.ValNone: break loop case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: - t, h := buf.AtFloatHistogram() - if value.IsStaleNaN(h.Sum) { - continue loop - } + t := buf.AtT() // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintHistograms { - if ev.currentSamples >= ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - ev.currentSamples++ + if t > mintHistograms { if histograms == nil { - histograms = getHPointSlice(16) + histograms = getMatrixSelectorHPoints() + } + n := len(histograms) + if n < cap(histograms) { + histograms = histograms[:n+1] + } else { + histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}}) + } + histograms[n].T, histograms[n].H = buf.AtFloatHistogram(histograms[n].H) + if value.IsStaleNaN(histograms[n].H.Sum) { + histograms = histograms[:n] + continue loop + } + ev.currentSamples += histograms[n].size() + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) } - histograms = append(histograms, HPoint{T: t, H: h}) } case chunkenc.ValFloat: t, f := buf.At() @@ -2071,11 +2395,11 @@ loop: continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintFloats { - if ev.currentSamples >= ev.maxSamples { + if t > mintFloats { + ev.currentSamples++ + if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) } - ev.currentSamples++ if floats == nil { floats = getFPointSlice(16) } @@ -2086,28 +2410,44 @@ loop: // The sought sample might also be in the range. switch soughtValueType { case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: - t, h := it.AtFloatHistogram() - if t == maxt && !value.IsStaleNaN(h.Sum) { - if ev.currentSamples >= ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - if histograms == nil { - histograms = getHPointSlice(16) - } - histograms = append(histograms, HPoint{T: t, H: h}) - ev.currentSamples++ + if it.AtT() != maxt { + break + } + if histograms == nil { + histograms = getMatrixSelectorHPoints() + } + n := len(histograms) + if n < cap(histograms) { + histograms = histograms[:n+1] + } else { + histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}}) } + if histograms[n].H == nil { + // Make sure to pass non-nil H to AtFloatHistogram so that it does a deep-copy. + // Not an issue in the loop above since that uses an intermediate buffer. + histograms[n].H = &histogram.FloatHistogram{} + } + histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H) + if value.IsStaleNaN(histograms[n].H.Sum) { + histograms = histograms[:n] + break + } + ev.currentSamples += histograms[n].size() + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + case chunkenc.ValFloat: t, f := it.At() if t == maxt && !value.IsStaleNaN(f) { - if ev.currentSamples >= ev.maxSamples { + ev.currentSamples++ + if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) } if floats == nil { floats = getFPointSlice(16) } floats = append(floats, FPoint{T: t, F: f}) - ev.currentSamples++ } } ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -2190,12 +2530,12 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi } // VectorBinop evaluates a binary operation between two Vectors, excluding set operators. -func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { +func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, error) { if matching.Card == parser.CardManyToMany { panic("many-to-many only allowed for set operators") } if len(lhs) == 0 || len(rhs) == 0 { - return nil // Short-circuit: nothing is going to match. + return nil, nil // Short-circuit: nothing is going to match. } // The control flow below handles one-to-one or many-to-one matching. @@ -2248,6 +2588,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * // For all lhs samples find a respective rhs sample and perform // the binary operation. + var lastErr error for i, ls := range lhs { sig := lhsh[i].signature @@ -2263,7 +2604,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * fl, fr = fr, fl hl, hr = hr, hl } - floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) + floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr) + if err != nil { + lastErr = err + } switch { case returnBool: if keep { @@ -2275,8 +2619,8 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * continue } metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) - if returnBool { - metric = enh.DropMetricName(metric) + if !ev.enableDelayedNameRemoval && returnBool { + metric = metric.DropMetricName() } insertedSigs, exists := matchedSigs[sig] if matching.Card == parser.CardOneToOne { @@ -2300,12 +2644,13 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * } enh.Out = append(enh.Out, Sample{ - Metric: metric, - F: floatValue, - H: histogramValue, + Metric: metric, + F: floatValue, + H: histogramValue, + DropName: returnBool, }) } - return enh.Out + return enh.Out, lastErr } func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string { @@ -2368,7 +2713,8 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. -func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { +func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) (Vector, error) { + var lastErr error for _, lhsSample := range lhs { lf, rf := lhsSample.F, rhs.V var rh *histogram.FloatHistogram @@ -2379,7 +2725,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala lf, rf = rf, lf lh, rh = rh, lh } - float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh) + float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh) + if err != nil { + lastErr = err + } // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. if op.IsComparisonOperator() && swap { @@ -2398,16 +2747,15 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala lhsSample.F = float lhsSample.H = histogram if shouldDropMetricName(op) || returnBool { - lhsSample.Metric = enh.DropMetricName(lhsSample.Metric) + if !ev.enableDelayedNameRemoval { + lhsSample.Metric = lhsSample.Metric.DropMetricName() + } + lhsSample.DropName = true } enh.Out = append(enh.Out, lhsSample) } } - return enh.Out -} - -func dropMetricName(l labels.Labels) labels.Labels { - return labels.NewBuilder(l).Del(labels.MetricName).Labels() + return enh.Out, lastErr } // scalarBinop evaluates a binary operation between two Scalars. @@ -2444,213 +2792,137 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { } // vectorElemBinop evaluates a binary operation between two Vector elements. -func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) { +func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { switch op { case parser.ADD: if hlhs != nil && hrhs != nil { - // The histogram being added must have the larger schema - // code (i.e. the higher resolution). - if hrhs.Schema >= hlhs.Schema { - return 0, hlhs.Copy().Add(hrhs).Compact(0), true + res, err := hlhs.Copy().Add(hrhs) + if err != nil { + return 0, nil, false, err } - return 0, hrhs.Copy().Add(hlhs).Compact(0), true + return 0, res.Compact(0), true, nil } - return lhs + rhs, nil, true + return lhs + rhs, nil, true, nil case parser.SUB: if hlhs != nil && hrhs != nil { - // The histogram being subtracted must have the larger schema - // code (i.e. the higher resolution). - if hrhs.Schema >= hlhs.Schema { - return 0, hlhs.Copy().Sub(hrhs).Compact(0), true + res, err := hlhs.Copy().Sub(hrhs) + if err != nil { + return 0, nil, false, err } - return 0, hrhs.Copy().Mul(-1).Add(hlhs).Compact(0), true + return 0, res.Compact(0), true, nil } - return lhs - rhs, nil, true + return lhs - rhs, nil, true, nil case parser.MUL: if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Mul(rhs), true + return 0, hlhs.Copy().Mul(rhs), true, nil } if hlhs == nil && hrhs != nil { - return 0, hrhs.Copy().Mul(lhs), true + return 0, hrhs.Copy().Mul(lhs), true, nil } - return lhs * rhs, nil, true + return lhs * rhs, nil, true, nil case parser.DIV: if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Div(rhs), true + return 0, hlhs.Copy().Div(rhs), true, nil } - return lhs / rhs, nil, true + return lhs / rhs, nil, true, nil case parser.POW: - return math.Pow(lhs, rhs), nil, true + return math.Pow(lhs, rhs), nil, true, nil case parser.MOD: - return math.Mod(lhs, rhs), nil, true + return math.Mod(lhs, rhs), nil, true, nil case parser.EQLC: - return lhs, nil, lhs == rhs + return lhs, nil, lhs == rhs, nil case parser.NEQ: - return lhs, nil, lhs != rhs + return lhs, nil, lhs != rhs, nil case parser.GTR: - return lhs, nil, lhs > rhs + return lhs, nil, lhs > rhs, nil case parser.LSS: - return lhs, nil, lhs < rhs + return lhs, nil, lhs < rhs, nil case parser.GTE: - return lhs, nil, lhs >= rhs + return lhs, nil, lhs >= rhs, nil case parser.LTE: - return lhs, nil, lhs <= rhs + return lhs, nil, lhs <= rhs, nil case parser.ATAN2: - return math.Atan2(lhs, rhs), nil, true + return math.Atan2(lhs, rhs), nil, true, nil } panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) } type groupedAggregation struct { - hasFloat bool // Has at least 1 float64 sample aggregated. - hasHistogram bool // Has at least 1 histogram sample aggregated. - labels labels.Labels floatValue float64 histogramValue *histogram.FloatHistogram floatMean float64 - histogramMean *histogram.FloatHistogram - groupCount int + floatKahanC float64 // "Compensating value" for Kahan summation. + groupCount float64 heap vectorByValueHeap - reverseHeap vectorByReverseValueHeap + + // All bools together for better packing within the struct. + seen bool // Was this output groups seen in the input at this timestamp. + hasFloat bool // Has at least 1 float64 sample aggregated. + hasHistogram bool // Has at least 1 histogram sample aggregated. + incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets. + groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group. + incrementalMean bool // True after reverting to incremental calculation of the mean value. } -// aggregation evaluates an aggregation operation on a Vector. The provided grouping labels -// must be sorted. -func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector { - result := map[uint64]*groupedAggregation{} - orderedResult := []*groupedAggregation{} - var k int64 - if op == parser.TOPK || op == parser.BOTTOMK { - f := param.(float64) - if !convertibleToInt64(f) { - ev.errorf("Scalar value %v overflows int64", f) - } - k = int64(f) - if k < 1 { - return Vector{} - } - } - var q float64 - if op == parser.QUANTILE { - q = param.(float64) - } - var valueLabel string - var recomputeGroupingKey bool - if op == parser.COUNT_VALUES { - valueLabel = param.(string) - if !model.LabelName(valueLabel).IsValid() { - ev.errorf("invalid label name %q", valueLabel) - } - if !without { - // We're changing the grouping labels so we have to ensure they're still sorted - // and we have to flag to recompute the grouping key. Considering the count_values() - // operator is less frequently used than other aggregations, we're fine having to - // re-compute the grouping key on each step for this case. - grouping = append(grouping, valueLabel) - slices.Sort(grouping) - recomputeGroupingKey = true - } +// aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix. +// These functions produce one output series for each group specified in the expression, with just the labels from `by(...)`. +// outputMatrix should be already populated with grouping labels; groups is one-to-one with outputMatrix. +// seriesToResult maps inputMatrix indexes to outputMatrix indexes. +func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix, outputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper) annotations.Annotations { + op := e.Op + var annos annotations.Annotations + for i := range groups { + groups[i].seen = false } - var buf []byte - for si, s := range vec { - metric := s.Metric - - if op == parser.COUNT_VALUES { - enh.resetBuilder(metric) - enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) - metric = enh.lb.Labels() - - // We've changed the metric so we have to recompute the grouping key. - recomputeGroupingKey = true - } - - // We can use the pre-computed grouping key unless grouping labels have changed. - var groupingKey uint64 - if !recomputeGroupingKey { - groupingKey = seriesHelper[si].groupingKey - } else { - groupingKey, buf = generateGroupingKey(metric, grouping, without, buf) + for si := range inputMatrix { + f, h, ok := ev.nextValues(enh.Ts, &inputMatrix[si]) + if !ok { + continue } - group, ok := result[groupingKey] - // Add a new group if it doesn't exist. - if !ok { - var m labels.Labels - enh.resetBuilder(metric) - switch { - case without: - enh.lb.Del(grouping...) - enh.lb.Del(labels.MetricName) - m = enh.lb.Labels() - case len(grouping) > 0: - enh.lb.Keep(grouping...) - m = enh.lb.Labels() - default: - m = labels.EmptyLabels() - } - newAgg := &groupedAggregation{ - labels: m, - floatValue: s.F, - floatMean: s.F, - groupCount: 1, - } - switch { - case s.H == nil: - newAgg.hasFloat = true - case op == parser.SUM: - newAgg.histogramValue = s.H.Copy() - newAgg.hasHistogram = true - case op == parser.AVG: - newAgg.histogramMean = s.H.Copy() - newAgg.hasHistogram = true - case op == parser.STDVAR || op == parser.STDDEV: - newAgg.groupCount = 0 - } - - result[groupingKey] = newAgg - orderedResult = append(orderedResult, newAgg) - - inputVecLen := int64(len(vec)) - resultSize := k - switch { - case k > inputVecLen: - resultSize = inputVecLen - case k == 0: - resultSize = 1 + group := &groups[seriesToResult[si]] + // Initialize this group if it's the first time we've seen it. + if !group.seen { + *group = groupedAggregation{ + seen: true, + floatValue: f, + floatMean: f, + incompatibleHistograms: false, + groupCount: 1, } switch op { - case parser.STDVAR, parser.STDDEV: - result[groupingKey].floatValue = 0 - case parser.TOPK, parser.QUANTILE: - result[groupingKey].heap = make(vectorByValueHeap, 1, resultSize) - result[groupingKey].heap[0] = Sample{ - F: s.F, - Metric: s.Metric, - } - case parser.BOTTOMK: - result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 1, resultSize) - result[groupingKey].reverseHeap[0] = Sample{ - F: s.F, - Metric: s.Metric, + case parser.AVG, parser.SUM: + if h == nil { + group.hasFloat = true + } else { + group.histogramValue = h.Copy() + group.hasHistogram = true } + case parser.STDVAR, parser.STDDEV: + group.floatValue = 0 + case parser.QUANTILE: + group.heap = make(vectorByValueHeap, 1) + group.heap[0] = Sample{F: f} case parser.GROUP: - result[groupingKey].floatValue = 1 + group.floatValue = 1 } continue } + if group.incompatibleHistograms { + continue + } + switch op { case parser.SUM: - if s.H != nil { + if h != nil { group.hasHistogram = true if group.histogramValue != nil { - // The histogram being added must have - // an equal or larger schema. - if s.H.Schema >= group.histogramValue.Schema { - group.histogramValue.Add(s.H) - } else { - group.histogramValue = s.H.Copy().Add(group.histogramValue) + _, err := group.histogramValue.Add(h) + if err != nil { + handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true } } // Otherwise the aggregation contained floats @@ -2658,24 +2930,27 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without // point in copying the histogram in that case. } else { group.hasFloat = true - group.floatValue += s.F + group.floatValue, group.floatKahanC = kahanSumInc(f, group.floatValue, group.floatKahanC) } case parser.AVG: group.groupCount++ - if s.H != nil { + if h != nil { group.hasHistogram = true - if group.histogramMean != nil { - left := s.H.Copy().Div(float64(group.groupCount)) - right := group.histogramMean.Copy().Div(float64(group.groupCount)) - // The histogram being added/subtracted must have - // an equal or larger schema. - if s.H.Schema >= group.histogramMean.Schema { - toAdd := right.Mul(-1).Add(left) - group.histogramMean.Add(toAdd) - } else { - toAdd := left.Sub(right) - group.histogramMean = toAdd.Add(group.histogramMean) + if group.histogramValue != nil { + left := h.Copy().Div(group.groupCount) + right := group.histogramValue.Copy().Div(group.groupCount) + toAdd, err := left.Sub(right) + if err != nil { + handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true + continue + } + _, err = group.histogramValue.Add(toAdd) + if err != nil { + handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true + continue } } // Otherwise the aggregation contained floats @@ -2683,14 +2958,30 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without // point in copying the histogram in that case. } else { group.hasFloat = true + if !group.incrementalMean { + newV, newC := kahanSumInc(f, group.floatValue, group.floatKahanC) + if !math.IsInf(newV, 0) { + // The sum doesn't overflow, so we propagate it to the + // group struct and continue with the regular + // calculation of the mean value. + group.floatValue, group.floatKahanC = newV, newC + break + } + // If we are here, we know that the sum _would_ overflow. So + // instead of continue to sum up, we revert to incremental + // calculation of the mean value from here on. + group.incrementalMean = true + group.floatMean = group.floatValue / (group.groupCount - 1) + group.floatKahanC /= group.groupCount - 1 + } if math.IsInf(group.floatMean, 0) { - if math.IsInf(s.F, 0) && (group.floatMean > 0) == (s.F > 0) { + if math.IsInf(f, 0) && (group.floatMean > 0) == (f > 0) { // The `floatMean` and `s.F` values are `Inf` of the same sign. They // can't be subtracted, but the value of `floatMean` is correct // already. break } - if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) { + if !math.IsInf(f, 0) && !math.IsNaN(f) { // At this stage, the mean is an infinite. If the added // value is neither an Inf or a Nan, we can keep that mean // value. @@ -2700,48 +2991,165 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without break } } - // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.floatMean += s.F/float64(group.groupCount) - group.floatMean/float64(group.groupCount) + currentMean := group.floatMean + group.floatKahanC + group.floatMean, group.floatKahanC = kahanSumInc( + // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. + f/group.groupCount-currentMean/group.groupCount, + group.floatMean, + group.floatKahanC, + ) } case parser.GROUP: // Do nothing. Required to avoid the panic in `default:` below. case parser.MAX: - if group.floatValue < s.F || math.IsNaN(group.floatValue) { - group.floatValue = s.F + if group.floatValue < f || math.IsNaN(group.floatValue) { + group.floatValue = f } case parser.MIN: - if group.floatValue > s.F || math.IsNaN(group.floatValue) { - group.floatValue = s.F + if group.floatValue > f || math.IsNaN(group.floatValue) { + group.floatValue = f } - case parser.COUNT, parser.COUNT_VALUES: + case parser.COUNT: group.groupCount++ case parser.STDVAR, parser.STDDEV: - if s.H == nil { // Ignore native histograms. + if h == nil { // Ignore native histograms. group.groupCount++ - delta := s.F - group.floatMean - group.floatMean += delta / float64(group.groupCount) - group.floatValue += delta * (s.F - group.floatMean) + delta := f - group.floatMean + group.floatMean += delta / group.groupCount + group.floatValue += delta * (f - group.floatMean) } + case parser.QUANTILE: + group.heap = append(group.heap, Sample{F: f}) + + default: + panic(fmt.Errorf("expected aggregation operator but got %q", op)) + } + } + + // Construct the output matrix from the aggregated groups. + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + + for ri, aggr := range groups { + if !aggr.seen { + continue + } + switch op { + case parser.AVG: + if aggr.hasFloat && aggr.hasHistogram { + // We cannot aggregate histogram sample with a float64 sample. + annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) + continue + } + switch { + case aggr.incompatibleHistograms: + continue + case aggr.hasHistogram: + aggr.histogramValue = aggr.histogramValue.Compact(0) + case aggr.incrementalMean: + aggr.floatValue = aggr.floatMean + aggr.floatKahanC + default: + aggr.floatValue = (aggr.floatValue + aggr.floatKahanC) / aggr.groupCount + } + + case parser.COUNT: + aggr.floatValue = aggr.groupCount + + case parser.STDVAR: + aggr.floatValue /= aggr.groupCount + + case parser.STDDEV: + aggr.floatValue = math.Sqrt(aggr.floatValue / aggr.groupCount) + + case parser.QUANTILE: + aggr.floatValue = quantile(q, aggr.heap) + + case parser.SUM: + if aggr.hasFloat && aggr.hasHistogram { + // We cannot aggregate histogram sample with a float64 sample. + annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) + continue + } + switch { + case aggr.incompatibleHistograms: + continue + case aggr.hasHistogram: + aggr.histogramValue.Compact(0) + default: + aggr.floatValue += aggr.floatKahanC + } + default: + // For other aggregations, we already have the right value. + } + + ss := &outputMatrix[ri] + addToSeries(ss, enh.Ts, aggr.floatValue, aggr.histogramValue, numSteps) + ss.DropName = inputMatrix[ri].DropName + } + + return annos +} + +// aggregationK evaluates topk, bottomk, limitk, or limit_ratio at one timestep on inputMatrix. +// Output that has the same labels as the input, but just k of them per group. +// seriesToResult maps inputMatrix indexes to groups indexes. +// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio. +// For a range query, aggregates output in the seriess map. +func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { + op := e.Op + var s Sample + var annos annotations.Annotations + // Used to short-cut the loop for LIMITK if we already collected k elements for every group + groupsRemaining := len(groups) + for i := range groups { + groups[i].seen = false + } + +seriesLoop: + for si := range inputMatrix { + f, _, ok := ev.nextValues(enh.Ts, &inputMatrix[si]) + if !ok { + continue + } + s = Sample{Metric: inputMatrix[si].Metric, F: f, DropName: inputMatrix[si].DropName} + + group := &groups[seriesToResult[si]] + // Initialize this group if it's the first time we've seen it. + if !group.seen { + // LIMIT_RATIO is a special case, as we may not add this very sample to the heap, + // while we also don't know the final size of it. + if op == parser.LIMIT_RATIO { + *group = groupedAggregation{ + seen: true, + heap: make(vectorByValueHeap, 0), + } + if ratiosampler.AddRatioSample(r, &s) { + heap.Push(&group.heap, &s) + } + } else { + *group = groupedAggregation{ + seen: true, + heap: make(vectorByValueHeap, 1, k), + } + group.heap[0] = s + } + continue + } + + switch op { case parser.TOPK: // We build a heap of up to k elements, with the smallest element at heap[0]. switch { - case int64(len(group.heap)) < k: - heap.Push(&group.heap, &Sample{ - F: s.F, - Metric: s.Metric, - }) + case len(group.heap) < k: + heap.Push(&group.heap, &s) case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): // This new element is bigger than the previous smallest element - overwrite that. - group.heap[0] = Sample{ - F: s.F, - Metric: s.Metric, - } + group.heap[0] = s if k > 1 { heap.Fix(&group.heap, 0) // Maintain the heap invariant. } @@ -2750,103 +3158,215 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.BOTTOMK: // We build a heap of up to k elements, with the biggest element at heap[0]. switch { - case int64(len(group.reverseHeap)) < k: - heap.Push(&group.reverseHeap, &Sample{ - F: s.F, - Metric: s.Metric, - }) - case group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)): + case len(group.heap) < k: + heap.Push((*vectorByReverseValueHeap)(&group.heap), &s) + case group.heap[0].F > s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): // This new element is smaller than the previous biggest element - overwrite that. - group.reverseHeap[0] = Sample{ - F: s.F, - Metric: s.Metric, - } + group.heap[0] = s if k > 1 { - heap.Fix(&group.reverseHeap, 0) // Maintain the heap invariant. + heap.Fix((*vectorByReverseValueHeap)(&group.heap), 0) // Maintain the heap invariant. } } - case parser.QUANTILE: - group.heap = append(group.heap, s) + case parser.LIMITK: + if len(group.heap) < k { + heap.Push(&group.heap, &s) + } + // LIMITK optimization: early break if we've added K elem to _every_ group, + // especially useful for large timeseries where the user is exploring labels via e.g. + // limitk(10, my_metric) + if !group.groupAggrComplete && len(group.heap) == k { + group.groupAggrComplete = true + groupsRemaining-- + if groupsRemaining == 0 { + break seriesLoop + } + } + + case parser.LIMIT_RATIO: + if ratiosampler.AddRatioSample(r, &s) { + heap.Push(&group.heap, &s) + } default: panic(fmt.Errorf("expected aggregation operator but got %q", op)) } } - // Construct the result Vector from the aggregated groups. - for _, aggr := range orderedResult { - switch op { - case parser.AVG: - if aggr.hasFloat && aggr.hasHistogram { - // We cannot aggregate histogram sample with a float64 sample. - // TODO(zenador): Issue warning when plumbing is in place. - continue - } - if aggr.hasHistogram { - aggr.histogramValue = aggr.histogramMean.Compact(0) - } else { - aggr.floatValue = aggr.floatMean - } - - case parser.COUNT, parser.COUNT_VALUES: - aggr.floatValue = float64(aggr.groupCount) - - case parser.STDVAR: - aggr.floatValue /= float64(aggr.groupCount) - - case parser.STDDEV: - aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) + // Construct the result from the aggregated groups. + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + var mat Matrix + if ev.endTimestamp == ev.startTimestamp { + mat = make(Matrix, 0, len(groups)) + } + add := func(lbls labels.Labels, f float64, dropName bool) { + // If this could be an instant query, add directly to the matrix so the result is in consistent order. + if ev.endTimestamp == ev.startTimestamp { + mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}, DropName: dropName}) + } else { + // Otherwise the results are added into seriess elements. + hash := lbls.Hash() + ss, ok := seriess[hash] + if !ok { + ss = Series{Metric: lbls, DropName: dropName} + } + addToSeries(&ss, enh.Ts, f, nil, numSteps) + seriess[hash] = ss + } + } + for _, aggr := range groups { + if !aggr.seen { + continue + } + switch op { case parser.TOPK: // The heap keeps the lowest value on top, so reverse it. if len(aggr.heap) > 1 { sort.Sort(sort.Reverse(aggr.heap)) } for _, v := range aggr.heap { - enh.Out = append(enh.Out, Sample{ - Metric: v.Metric, - F: v.F, - }) + add(v.Metric, v.F, v.DropName) } - continue // Bypass default append. case parser.BOTTOMK: // The heap keeps the highest value on top, so reverse it. - if len(aggr.reverseHeap) > 1 { - sort.Sort(sort.Reverse(aggr.reverseHeap)) + if len(aggr.heap) > 1 { + sort.Sort(sort.Reverse((*vectorByReverseValueHeap)(&aggr.heap))) } - for _, v := range aggr.reverseHeap { - enh.Out = append(enh.Out, Sample{ - Metric: v.Metric, - F: v.F, - }) + for _, v := range aggr.heap { + add(v.Metric, v.F, v.DropName) } - continue // Bypass default append. - case parser.QUANTILE: - aggr.floatValue = quantile(q, aggr.heap) - - case parser.SUM: - if aggr.hasFloat && aggr.hasHistogram { - // We cannot aggregate histogram sample with a float64 sample. - // TODO(zenador): Issue warning when plumbing is in place. - continue + case parser.LIMITK, parser.LIMIT_RATIO: + for _, v := range aggr.heap { + add(v.Metric, v.F, v.DropName) } - if aggr.hasHistogram { - aggr.histogramValue.Compact(0) + } + } + + return mat, annos +} + +// aggregationCountValues evaluates count_values on vec. +// Outputs as many series per group as there are values in the input. +func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + type groupCount struct { + labels labels.Labels + count int + } + result := map[uint64]*groupCount{} + + var buf []byte + for _, s := range vec { + enh.resetBuilder(s.Metric) + enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) + metric := enh.lb.Labels() + + // Considering the count_values() + // operator is less frequently used than other aggregations, we're fine having to + // re-compute the grouping key on each step for this case. + var groupingKey uint64 + groupingKey, buf = generateGroupingKey(metric, grouping, e.Without, buf) + + group, ok := result[groupingKey] + // Add a new group if it doesn't exist. + if !ok { + result[groupingKey] = &groupCount{ + labels: generateGroupingLabels(enh, metric, e.Without, grouping), + count: 1, } - default: - // For other aggregations, we already have the right value. + continue } + group.count++ + } + + // Construct the result Vector from the aggregated groups. + for _, aggr := range result { enh.Out = append(enh.Out, Sample{ Metric: aggr.labels, - F: aggr.floatValue, - H: aggr.histogramValue, + F: float64(aggr.count), }) } - return enh.Out + return enh.Out, nil +} + +func (ev *evaluator) cleanupMetricLabels(v parser.Value) { + if v.Type() == parser.ValueTypeMatrix { + mat := v.(Matrix) + for i := range mat { + if mat[i].DropName { + mat[i].Metric = mat[i].Metric.DropMetricName() + } + } + if mat.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + } else if v.Type() == parser.ValueTypeVector { + vec := v.(Vector) + for i := range vec { + if vec[i].DropName { + vec[i].Metric = vec[i].Metric.DropMetricName() + } + } + if vec.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + } +} + +func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, numSteps int) { + if h == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{T: ts, F: f}) + return + } + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{T: ts, H: h}) +} + +func (ev *evaluator) nextValues(ts int64, series *Series) (f float64, h *histogram.FloatHistogram, b bool) { + switch { + case len(series.Floats) > 0 && series.Floats[0].T == ts: + f = series.Floats[0].F + series.Floats = series.Floats[1:] // Move input vectors forward + case len(series.Histograms) > 0 && series.Histograms[0].T == ts: + h = series.Histograms[0].H + series.Histograms = series.Histograms[1:] + default: + return f, h, false + } + return f, h, true +} + +// handleAggregationError adds the appropriate annotation based on the aggregation error. +func handleAggregationError(err error, e *parser.AggregateExpr, metricName string, annos *annotations.Annotations) { + pos := e.Expr.PositionRange() + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } +} + +// handleVectorBinopError returns the appropriate annotation based on the vector binary operation error. +func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotations { + if err == nil { + return nil + } + metricName := "" + pos := e.PositionRange() + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } + return nil } // groupingKey builds and returns the grouping key for the given metric and @@ -2864,6 +3384,21 @@ func generateGroupingKey(metric labels.Labels, grouping []string, without bool, return metric.HashForLabels(buf, grouping...) } +func generateGroupingLabels(enh *EvalNodeHelper, metric labels.Labels, without bool, grouping []string) labels.Labels { + enh.resetBuilder(metric) + switch { + case without: + enh.lb.Del(grouping...) + enh.lb.Del(labels.MetricName) + return enh.lb.Labels() + case len(grouping) > 0: + enh.lb.Keep(grouping...) + return enh.lb.Labels() + default: + return labels.EmptyLabels() + } +} + // btos returns 1 if b is true, 0 otherwise. func btos(b bool) float64 { if b { @@ -2913,6 +3448,8 @@ func unwrapStepInvariantExpr(e parser.Expr) parser.Expr { // PreprocessExpr wraps all possible step invariant parts of the given expression with // StepInvariantExpr. It also resolves the preprocessors. func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr { + detectHistogramStatsDecoding(expr) + isStepInvariant := preprocessExprHelper(expr, start, end) if isStepInvariant { return newStepInvariantExpr(expr) @@ -3047,8 +3584,106 @@ func setOffsetForAtModifier(evalTime int64, expr parser.Expr) { }) } +// detectHistogramStatsDecoding modifies the expression by setting the +// SkipHistogramBuckets field in those vector selectors for which it is safe to +// return only histogram statistics (sum and count), excluding histogram spans +// and buckets. The function can be treated as an optimization and is not +// required for correctness. +func detectHistogramStatsDecoding(expr parser.Expr) { + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + if n, ok := node.(*parser.BinaryExpr); ok { + detectHistogramStatsDecoding(n.LHS) + detectHistogramStatsDecoding(n.RHS) + return fmt.Errorf("stop") + } + + n, ok := (node).(*parser.VectorSelector) + if !ok { + return nil + } + + for _, p := range path { + call, ok := p.(*parser.Call) + if !ok { + continue + } + if call.Func.Name == "histogram_count" || call.Func.Name == "histogram_sum" { + n.SkipHistogramBuckets = true + break + } + if call.Func.Name == "histogram_quantile" || call.Func.Name == "histogram_fraction" { + n.SkipHistogramBuckets = false + break + } + } + return fmt.Errorf("stop") + }) +} + func makeInt64Pointer(val int64) *int64 { valp := new(int64) *valp = val return valp } + +// RatioSampler allows unit-testing (previously: Randomizer). +type RatioSampler interface { + // Return this sample "offset" between [0.0, 1.0] + sampleOffset(ts int64, sample *Sample) float64 + AddRatioSample(r float64, sample *Sample) bool +} + +// HashRatioSampler uses Hash(labels.String()) / maxUint64 as a "deterministic" +// value in [0.0, 1.0]. +type HashRatioSampler struct{} + +var ratiosampler RatioSampler = NewHashRatioSampler() + +func NewHashRatioSampler() *HashRatioSampler { + return &HashRatioSampler{} +} + +func (s *HashRatioSampler) sampleOffset(ts int64, sample *Sample) float64 { + const ( + float64MaxUint64 = float64(math.MaxUint64) + ) + return float64(sample.Metric.Hash()) / float64MaxUint64 +} + +func (s *HashRatioSampler) AddRatioSample(ratioLimit float64, sample *Sample) bool { + // If ratioLimit >= 0: add sample if sampleOffset is lesser than ratioLimit + // + // 0.0 ratioLimit 1.0 + // [---------|--------------------------] + // [#########...........................] + // + // e.g.: + // sampleOffset==0.3 && ratioLimit==0.4 + // 0.3 < 0.4 ? --> add sample + // + // Else if ratioLimit < 0: add sample if rand() return the "complement" of ratioLimit>=0 case + // (loosely similar behavior to negative array index in other programming languages) + // + // 0.0 1+ratioLimit 1.0 + // [---------|--------------------------] + // [.........###########################] + // + // e.g.: + // sampleOffset==0.3 && ratioLimit==-0.6 + // 0.3 >= 0.4 ? --> don't add sample + sampleOffset := s.sampleOffset(sample.T, sample) + return (ratioLimit >= 0 && sampleOffset < ratioLimit) || + (ratioLimit < 0 && sampleOffset >= (1.0+ratioLimit)) +} + +type histogramStatsSeries struct { + storage.Series +} + +func newHistogramStatsSeries(series storage.Series) *histogramStatsSeries { + return &histogramStatsSeries{Series: series} +} + +func (s histogramStatsSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { + return NewHistogramStatsIterator(s.Series.Iterator(it)) +} diff --git a/promql/engine_internal_test.go b/promql/engine_internal_test.go new file mode 100644 index 0000000000..cb501b2fdf --- /dev/null +++ b/promql/engine_internal_test.go @@ -0,0 +1,82 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "errors" + "testing" + + "github.com/go-kit/log" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/util/annotations" +) + +func TestRecoverEvaluatorRuntime(t *testing.T) { + var output []interface{} + logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { + output = append(output, keyvals...) + return nil + })) + ev := &evaluator{logger: logger} + + expr, _ := parser.ParseExpr("sum(up)") + + var err error + + defer func() { + require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0") + require.Contains(t, output, "sum(up)") + }() + defer ev.recover(expr, nil, &err) + + // Cause a runtime panic. + var a []int + a[123] = 1 +} + +func TestRecoverEvaluatorError(t *testing.T) { + ev := &evaluator{logger: log.NewNopLogger()} + var err error + + e := errors.New("custom error") + + defer func() { + require.EqualError(t, err, e.Error()) + }() + defer ev.recover(nil, nil, &err) + + panic(e) +} + +func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) { + ev := &evaluator{logger: log.NewNopLogger()} + var err error + var ws annotations.Annotations + + warnings := annotations.New().Add(errors.New("custom warning")) + e := errWithWarnings{ + err: errors.New("custom error"), + warnings: warnings, + } + + defer func() { + require.EqualError(t, err, e.Error()) + require.Equal(t, warnings, ws, "wrong warning message") + }() + defer ev.recover(nil, &ws, &err) + + panic(e) +} diff --git a/promql/engine_test.go b/promql/engine_test.go index 54567d154c..19bd781445 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -11,62 +11,72 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "context" "errors" "fmt" "math" - "os" "sort" + "strconv" + "strings" + "sync" "testing" "time" - "github.com/go-kit/log" - - "github.com/prometheus/prometheus/tsdb/tsdbutil" - "github.com/stretchr/testify/require" - "go.uber.org/goleak" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" + "github.com/prometheus/prometheus/util/teststorage" + "github.com/prometheus/prometheus/util/testutil" +) + +const ( + env = "query execution" + defaultLookbackDelta = 5 * time.Minute + defaultEpsilon = 0.000001 // Relative error allowed for sample values. ) func TestMain(m *testing.M) { - goleak.VerifyTestMain(m) + // Enable experimental functions testing + parser.EnableExperimentalFunctions = true + testutil.TolerantVerifyLeak(m) } func TestQueryConcurrency(t *testing.T) { maxConcurrency := 10 - dir, err := os.MkdirTemp("", "test_concurrency") - require.NoError(t, err) - defer os.RemoveAll(dir) - queryTracker := NewActiveQueryTracker(dir, maxConcurrency, nil) - t.Cleanup(queryTracker.Close) - - opts := EngineOpts{ + queryTracker := promql.NewActiveQueryTracker(t.TempDir(), maxConcurrency, nil) + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 100 * time.Second, ActiveQueryTracker: queryTracker, } + engine := promqltest.NewTestEngineWithOpts(t, opts) - engine := NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) - defer cancelCtx() + t.Cleanup(cancelCtx) block := make(chan struct{}) processing := make(chan struct{}) done := make(chan int) - defer close(done) + t.Cleanup(func() { + close(done) + }) f := func(context.Context) error { select { @@ -81,9 +91,14 @@ func TestQueryConcurrency(t *testing.T) { return nil } + var wg sync.WaitGroup for i := 0; i < maxConcurrency; i++ { - q := engine.newTestQuery(f) - go q.Exec(ctx) + q := engine.NewTestQuery(f) + wg.Add(1) + go func() { + q.Exec(ctx) + wg.Done() + }() select { case <-processing: // Expected. @@ -92,8 +107,12 @@ func TestQueryConcurrency(t *testing.T) { } } - q := engine.newTestQuery(f) - go q.Exec(ctx) + q := engine.NewTestQuery(f) + wg.Add(1) + go func() { + q.Exec(ctx) + wg.Done() + }() select { case <-processing: @@ -116,20 +135,37 @@ func TestQueryConcurrency(t *testing.T) { for i := 0; i < maxConcurrency; i++ { block <- struct{}{} } + + wg.Wait() +} + +// contextDone returns an error if the context was canceled or timed out. +func contextDone(ctx context.Context, env string) error { + if err := ctx.Err(); err != nil { + switch { + case errors.Is(err, context.Canceled): + return promql.ErrQueryCanceled(env) + case errors.Is(err, context.DeadlineExceeded): + return promql.ErrQueryTimeout(env) + default: + return err + } + } + return nil } func TestQueryTimeout(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 5 * time.Millisecond, } - engine := NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { time.Sleep(100 * time.Millisecond) return contextDone(ctx, "test statement execution") }) @@ -137,20 +173,20 @@ func TestQueryTimeout(t *testing.T) { res := query.Exec(ctx) require.Error(t, res.Err, "expected timeout error but got none") - var e ErrQueryTimeout - require.True(t, errors.As(res.Err, &e), "expected timeout error but got: %s", res.Err) + var e promql.ErrQueryTimeout + require.ErrorAs(t, res.Err, &e, "expected timeout error but got: %s", res.Err) } -const errQueryCanceled = ErrQueryCanceled("test statement execution") +const errQueryCanceled = promql.ErrQueryCanceled("test statement execution") func TestQueryCancel(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -158,13 +194,13 @@ func TestQueryCancel(t *testing.T) { block := make(chan struct{}) processing := make(chan struct{}) - query1 := engine.newTestQuery(func(ctx context.Context) error { + query1 := engine.NewTestQuery(func(ctx context.Context) error { processing <- struct{}{} <-block return contextDone(ctx, "test statement execution") }) - var res *Result + var res *promql.Result go func() { res = query1.Exec(ctx) @@ -180,7 +216,7 @@ func TestQueryCancel(t *testing.T) { require.Equal(t, errQueryCanceled, res.Err) // Canceling a query before starting it must have no effect. - query2 := engine.newTestQuery(func(ctx context.Context) error { + query2 := engine.NewTestQuery(func(ctx context.Context) error { return contextDone(ctx, "test statement execution") }) @@ -194,15 +230,15 @@ type errQuerier struct { err error } -func (q *errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet { +func (q *errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet { return errSeriesSet{err: q.err} } -func (*errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (*errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (*errQuerier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) { +func (*errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } func (*errQuerier) Close() error { return nil } @@ -212,21 +248,21 @@ type errSeriesSet struct { err error } -func (errSeriesSet) Next() bool { return false } -func (errSeriesSet) At() storage.Series { return nil } -func (e errSeriesSet) Err() error { return e.err } -func (e errSeriesSet) Warnings() storage.Warnings { return nil } +func (errSeriesSet) Next() bool { return false } +func (errSeriesSet) At() storage.Series { return nil } +func (e errSeriesSet) Err() error { return e.err } +func (e errSeriesSet) Warnings() annotations.Annotations { return nil } func TestQueryError(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) - errStorage := ErrStorage{errors.New("storage error")} - queryable := storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + engine := promqltest.NewTestEngineWithOpts(t, opts) + errStorage := promql.ErrStorage{errors.New("storage error")} + queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return &errQuerier{err: errStorage}, nil }) ctx, cancelCtx := context.WithCancel(context.Background()) @@ -237,21 +273,21 @@ func TestQueryError(t *testing.T) { res := vectorQuery.Exec(ctx) require.Error(t, res.Err, "expected error on failed select but got none") - require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match") + require.ErrorIs(t, res.Err, errStorage, "expected error doesn't match") matrixQuery, err := engine.NewInstantQuery(ctx, queryable, nil, "foo[1m]", time.Unix(1, 0)) require.NoError(t, err) res = matrixQuery.Exec(ctx) require.Error(t, res.Err, "expected error on failed select but got none") - require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match") + require.ErrorIs(t, res.Err, errStorage, "expected error doesn't match") } type noopHintRecordingQueryable struct { hints []*storage.SelectHints } -func (h *noopHintRecordingQueryable) Querier(context.Context, int64, int64) (storage.Querier, error) { +func (h *noopHintRecordingQueryable) Querier(int64, int64) (storage.Querier, error) { return &hintRecordingQuerier{Querier: &errQuerier{}, h: h}, nil } @@ -261,13 +297,13 @@ type hintRecordingQuerier struct { h *noopHintRecordingQueryable } -func (h *hintRecordingQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (h *hintRecordingQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { h.h.hints = append(h.h.hints, hints) - return h.Querier.Select(sortSeries, hints, matchers...) + return h.Querier.Select(ctx, sortSeries, hints, matchers...) } func TestSelectHintsSetCorrectly(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, @@ -289,283 +325,284 @@ func TestSelectHintsSetCorrectly(t *testing.T) { { query: "foo", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000}, + {Start: 5001, End: 10000}, }, }, { query: "foo @ 15", start: 10000, expected: []*storage.SelectHints{ - {Start: 10000, End: 15000}, + {Start: 10001, End: 15000}, }, }, { query: "foo @ 1", start: 10000, expected: []*storage.SelectHints{ - {Start: -4000, End: 1000}, + {Start: -3999, End: 1000}, }, }, { query: "foo[2m]", start: 200000, expected: []*storage.SelectHints{ - {Start: 80000, End: 200000, Range: 120000}, + {Start: 80001, End: 200000, Range: 120000}, }, }, { query: "foo[2m] @ 180", start: 200000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, + {Start: 60001, End: 180000, Range: 120000}, }, }, { query: "foo[2m] @ 300", start: 200000, expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000}, + {Start: 180001, End: 300000, Range: 120000}, }, }, { query: "foo[2m] @ 60", start: 200000, expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000}, + {Start: -59999, End: 60000, Range: 120000}, }, }, { query: "foo[2m] offset 2m", start: 300000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, + {Start: 60001, End: 180000, Range: 120000}, }, }, { query: "foo[2m] @ 200 offset 2m", start: 300000, expected: []*storage.SelectHints{ - {Start: -40000, End: 80000, Range: 120000}, + {Start: -39999, End: 80000, Range: 120000}, }, }, { query: "foo[2m:1s]", start: 300000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Step: 1000}, + {Start: 175001, End: 300000, Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s])", start: 300000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, + {Start: 75001, End: 200000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, + {Start: -24999, End: 100000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 165000, End: 290000, Func: "count_over_time", Step: 1000}, + {Start: 165001, End: 290000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 155000, End: 280000, Func: "count_over_time", Step: 1000}, + {Start: 155001, End: 280000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, + {Start: -44999, End: 80000, Func: "count_over_time", Step: 1000}, }, }, { query: "foo", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: 5000, End: 20000, Step: 1000}, + {Start: 5001, End: 20000, Step: 1000}, }, }, { query: "foo @ 15", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: 10000, End: 15000, Step: 1000}, + {Start: 10001, End: 15000, Step: 1000}, }, }, { query: "foo @ 1", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: -4000, End: 1000, Step: 1000}, + {Start: -3999, End: 1000, Step: 1000}, }, }, { query: "rate(foo[2m] @ 180)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 60001, End: 180000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] @ 300)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 180001, End: 300000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] @ 60)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000}, + {Start: -59999, End: 60000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m])", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 80001, End: 500000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 60001, End: 380000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m:1s])", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "rate", Step: 1000}, + {Start: 175001, End: 500000, Func: "rate", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 500000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 165000, End: 490000, Func: "count_over_time", Step: 1000}, + {Start: 165001, End: 490000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, + {Start: 75001, End: 200000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, + {Start: -24999, End: 100000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 155000, End: 480000, Func: "count_over_time", Step: 1000}, + {Start: 155001, End: 480000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, + {Start: -44999, End: 80000, Func: "count_over_time", Step: 1000}, }, }, { query: "sum by (dim1) (foo)", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, + {Start: 5001, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, }, }, { query: "sum without (dim1) (foo)", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, + {Start: 5001, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, }, }, { query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000, expected: []*storage.SelectHints{ - {Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000}, + {Start: 9001, End: 10000, Func: "avg_over_time", Range: 1000}, }, }, { query: "sum by (dim1) (max by (dim2) (foo))", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, + {Start: 5001, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, }, }, { query: "(max by (dim1) (foo))[5s:1s]", start: 10000, expected: []*storage.SelectHints{ - {Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000}, + {Start: 1, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000}, }, }, { query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000, expected: []*storage.SelectHints{ - {Start: 95000, End: 120000, Func: "sum", By: true, Step: 5000}, - {Start: 95000, End: 120000, Func: "max", By: true, Step: 5000}, + {Start: 95001, End: 120000, Func: "sum", By: true, Step: 5000}, + {Start: 95001, End: 120000, Func: "max", By: true, Step: 5000}, }, }, { query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 45001, End: 50000, Step: 1000}, + {Start: 245001, End: 250000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 45001, End: 50000, Step: 1000}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 48001, End: 50000, Step: 1000, Func: "rate", Range: 2000}, + {Start: 245001, End: 250000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, + {Start: 43001, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 95001, End: 500000, Step: 1000}, }, }, { query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 655000, End: 780000, Step: 1000, Func: "rate"}, + {Start: 43001, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 655001, End: 780000, Step: 1000, Func: "rate"}, }, }, { // Hints are based on the inner most subquery timestamp. query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000, expected: []*storage.SelectHints{ - {Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000}, + {Start: -149999, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000}, }, }, { // Hints are based on the inner most subquery timestamp. query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`, expected: []*storage.SelectHints{ - {Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000}, + {Start: 2800001, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000}, }, }, } { t.Run(tc.query, func(t *testing.T) { - engine := NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) hintsRecorder := &noopHintRecordingQueryable{} var ( - query Query + query promql.Query err error ) ctx := context.Background() + if tc.end == 0 { query, err = engine.NewInstantQuery(ctx, hintsRecorder, nil, tc.query, timestamp.Time(tc.start)) } else { @@ -573,7 +610,7 @@ func TestSelectHintsSetCorrectly(t *testing.T) { } require.NoError(t, err) - res := query.Exec(ctx) + res := query.Exec(context.Background()) require.NoError(t, res.Err) require.Equal(t, tc.expected, hintsRecorder.hints) @@ -582,13 +619,13 @@ func TestSelectHintsSetCorrectly(t *testing.T) { } func TestEngineShutdown(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) block := make(chan struct{}) @@ -601,13 +638,13 @@ func TestEngineShutdown(t *testing.T) { <-block return contextDone(ctx, "test statement execution") } - query1 := engine.newTestQuery(f) + query1 := engine.NewTestQuery(f) // Stopping the engine must cancel the base context. While executing queries is // still possible, their context is canceled from the beginning and execution should // terminate immediately. - var res *Result + var res *promql.Result go func() { res = query1.Exec(ctx) processing <- struct{}{} @@ -621,7 +658,7 @@ func TestEngineShutdown(t *testing.T) { require.Error(t, res.Err, "expected error on shutdown during query but got none") require.Equal(t, errQueryCanceled, res.Err) - query2 := engine.newTestQuery(func(context.Context) error { + query2 := engine.NewTestQuery(func(context.Context) error { require.FailNow(t, "reached query execution unexpectedly") return nil }) @@ -631,20 +668,16 @@ func TestEngineShutdown(t *testing.T) { res2 := query2.Exec(ctx) require.Error(t, res2.Err, "expected error on querying with canceled context but got none") - var e ErrQueryCanceled - require.True(t, errors.As(res2.Err, &e), "expected cancellation error but got: %s", res2.Err) + var e promql.ErrQueryCanceled + require.ErrorAs(t, res2.Err, &e, "expected cancellation error but got: %s", res2.Err) } func TestEngineEvalStmtTimestamps(t *testing.T) { - test, err := NewTest(t, ` + storage := promqltest.LoadedStorage(t, ` load 10s metric 1 2 `) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) cases := []struct { Query string @@ -657,13 +690,13 @@ load 10s // Instant queries. { Query: "1", - Result: Scalar{V: 1, T: 1000}, + Result: promql.Scalar{V: 1, T: 1000}, Start: time.Unix(1, 0), }, { Query: "metric", - Result: Vector{ - Sample{ + Result: promql.Vector{ + promql.Sample{ F: 1, T: 1000, Metric: labels.FromStrings("__name__", "metric"), @@ -673,9 +706,9 @@ load 10s }, { Query: "metric[20s]", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -684,9 +717,9 @@ load 10s // Range queries. { Query: "1", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, Metric: labels.EmptyLabels(), }, }, @@ -696,9 +729,9 @@ load 10s }, { Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -708,9 +741,9 @@ load 10s }, { Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -727,15 +760,16 @@ load 10s for i, c := range cases { t.Run(fmt.Sprintf("%d query=%s", i, c.Query), func(t *testing.T) { var err error - var qry Query + var qry promql.Query + engine := newTestEngine(t) if c.Interval == 0 { - qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) } else { - qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) + qry, err = engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) if c.ShouldError { require.Error(t, res.Err, "expected error for the query %q", c.Query) return @@ -748,18 +782,15 @@ load 10s } func TestQueryStatistics(t *testing.T) { - test, err := NewTest(t, ` + storage := promqltest.LoadedStorage(t, ` load 10s metricWith1SampleEvery10Seconds 1+1x100 metricWith3SampleEvery10Seconds{a="1",b="1"} 1+1x100 metricWith3SampleEvery10Seconds{a="2",b="2"} 1+1x100 metricWith3SampleEvery10Seconds{a="3",b="2"} 1+1x100 + metricWith1HistogramEvery10Seconds {{schema:1 count:5 sum:20 buckets:[1 2 1 1]}}+{{schema:1 count:10 sum:5 buckets:[1 2 3 4]}}x100 `) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) cases := []struct { Query string @@ -798,6 +829,15 @@ load 10s 21000: 1, }, }, + { + Query: "metricWith1HistogramEvery10Seconds", + Start: time.Unix(21, 0), + PeakSamples: 13, + TotalSamples: 13, // 1 histogram HPoint of size 13 / 10 seconds + TotalSamplesPerStep: stats.TotalSamplesPerStep{ + 21000: 13, + }, + }, { // timestamp function has a special handling. Query: "timestamp(metricWith1SampleEvery10Seconds)", @@ -808,6 +848,15 @@ load 10s 21000: 1, }, }, + { + Query: "timestamp(metricWith1HistogramEvery10Seconds)", + Start: time.Unix(21, 0), + PeakSamples: 2, + TotalSamples: 1, // 1 float sample (because of timestamp) / 10 seconds + TotalSamplesPerStep: stats.TotalSamplesPerStep{ + 21000: 1, + }, + }, { Query: "metricWith1SampleEvery10Seconds", Start: time.Unix(22, 0), @@ -881,26 +930,42 @@ load 10s }, }, { - Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]", + Query: "metricWith1HistogramEvery10Seconds[60s]", + Start: time.Unix(201, 0), + PeakSamples: 78, + TotalSamples: 78, // 1 histogram (size 13 HPoint) / 10 seconds * 60 seconds + TotalSamplesPerStep: stats.TotalSamplesPerStep{ + 201000: 78, + }, + }, + { + Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 10, - TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 60/5 (using 59s so we always return 6 samples - // as if we run a query on 00 looking back 60 seconds we will return 7 samples; - // see next test). + TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 4 TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 24, }, }, { - Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", + Query: "max_over_time(metricWith1SampleEvery10Seconds[61s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 11, - TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) + 2 as - // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. + TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) * 4 + 2 as + // max_over_time(metricWith1SampleEvery10Seconds[61s]) @ 190 and 200 will return 7 samples. TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 26, }, }, + { + Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]", + Start: time.Unix(201, 0), + PeakSamples: 78, + TotalSamples: 312, // (1 histogram (size 13) / 10 seconds * 60 seconds) * 4 + TotalSamplesPerStep: stats.TotalSamplesPerStep{ + 201000: 312, + }, + }, { Query: "metricWith1SampleEvery10Seconds[60s] @ 30", Start: time.Unix(201, 0), @@ -910,6 +975,15 @@ load 10s 201000: 4, }, }, + { + Query: "metricWith1HistogramEvery10Seconds[60s] @ 30", + Start: time.Unix(201, 0), + PeakSamples: 52, + TotalSamples: 52, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series + TotalSamplesPerStep: stats.TotalSamplesPerStep{ + 201000: 52, + }, + }, { Query: "sum(max_over_time(metricWith3SampleEvery10Seconds[60s] @ 30))", Start: time.Unix(201, 0), @@ -922,7 +996,7 @@ load 10s { Query: "sum by (b) (max_over_time(metricWith3SampleEvery10Seconds[60s] @ 30))", Start: time.Unix(201, 0), - PeakSamples: 8, + PeakSamples: 7, TotalSamples: 12, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 3 series TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 12, @@ -1038,13 +1112,42 @@ load 10s }, }, { - // timestamp function as a special handling + Query: `metricWith1HistogramEvery10Seconds`, + Start: time.Unix(204, 0), + End: time.Unix(223, 0), + Interval: 5 * time.Second, + PeakSamples: 52, + TotalSamples: 52, // 1 histogram (size 13 HPoint) per query * 4 steps + TotalSamplesPerStep: stats.TotalSamplesPerStep{ + 204000: 13, // aligned to the step time, not the sample time + 209000: 13, + 214000: 13, + 219000: 13, + }, + }, + { + // timestamp function has a special handling Query: "timestamp(metricWith1SampleEvery10Seconds)", Start: time.Unix(201, 0), End: time.Unix(220, 0), Interval: 5 * time.Second, PeakSamples: 5, - TotalSamples: 4, // (1 sample / 10 seconds) * 4 steps + TotalSamples: 4, // 1 sample per query * 4 steps + TotalSamplesPerStep: stats.TotalSamplesPerStep{ + 201000: 1, + 206000: 1, + 211000: 1, + 216000: 1, + }, + }, + { + // timestamp function has a special handling + Query: "timestamp(metricWith1HistogramEvery10Seconds)", + Start: time.Unix(201, 0), + End: time.Unix(220, 0), + Interval: 5 * time.Second, + PeakSamples: 5, + TotalSamples: 4, // 1 sample per query * 4 steps TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 1, 206000: 1, @@ -1194,25 +1297,22 @@ load 10s }, } - engine := test.QueryEngine() - engine.enablePerStepStats = true - origMaxSamples := engine.maxSamplesPerQuery for _, c := range cases { t.Run(c.Query, func(t *testing.T) { - opts := NewPrometheusQueryOpts(true, 0) - engine.maxSamplesPerQuery = origMaxSamples + opts := promql.NewPrometheusQueryOpts(true, 0) + engine := promqltest.NewTestEngine(t, true, 0, promqltest.DefaultMaxSamplesPerQuery) runQuery := func(expErr error) *stats.Statistics { var err error - var qry Query + var qry promql.Query if c.Interval == 0 { - qry, err = engine.NewInstantQuery(test.context, test.Queryable(), opts, c.Query, c.Start) + qry, err = engine.NewInstantQuery(context.Background(), storage, opts, c.Query, c.Start) } else { - qry, err = engine.NewRangeQuery(test.context, test.Queryable(), opts, c.Query, c.Start, c.End, c.Interval) + qry, err = engine.NewRangeQuery(context.Background(), storage, opts, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.Equal(t, expErr, res.Err) return qry.Stats() @@ -1227,24 +1327,20 @@ load 10s if c.SkipMaxCheck { return } - engine.maxSamplesPerQuery = stats.Samples.PeakSamples - 1 - runQuery(ErrTooManySamples(env)) + engine = promqltest.NewTestEngine(t, true, 0, stats.Samples.PeakSamples-1) + runQuery(promql.ErrTooManySamples(env)) }) } } func TestMaxQuerySamples(t *testing.T) { - test, err := NewTest(t, ` + storage := promqltest.LoadedStorage(t, ` load 10s metric 1+1x100 bigmetric{a="1"} 1+1x100 bigmetric{a="2"} 1+1x100 `) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) // These test cases should be touching the limit exactly (hence no exceeding). // Exceeding the limit will be tested by doing -1 to the MaxSamples. @@ -1332,70 +1428,70 @@ load 10s }, { // The peak samples in memory is during the first evaluation: - // - Subquery takes 22 samples, 11 for each bigmetric, - // - Result is calculated per series where the series samples is buffered, hence 11 more here. + // - Subquery takes 22 samples, 11 for each bigmetric, but samples on the left bound won't be evaluated. + // - Result is calculated per series where the series samples is buffered, hence 10 more here. // - The result of two series is added before the last series buffer is discarded, so 2 more here. - // Hence at peak it is 22 (subquery) + 11 (buffer of a series) + 2 (result from 2 series). + // Hence at peak it is 22 (subquery) + 10 (buffer of a series) + 2 (result from 2 series). // The subquery samples and the buffer is discarded before duplicating. Query: `rate(bigmetric[10s:1s] @ 10)`, - MaxSamples: 35, + MaxSamples: 34, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, }, { // Here the reasoning is same as above. But LHS and RHS are done one after another. - // So while one of them takes 35 samples at peak, we need to hold the 2 sample + // So while one of them takes 34 samples at peak, we need to hold the 2 sample // result of the other till then. Query: `rate(bigmetric[10s:1s] @ 10) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 37, + MaxSamples: 36, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, }, { - // Sample as above but with only 1 part as step invariant. + // promql.Sample as above but with only 1 part as step invariant. // Here the peak is caused by the non-step invariant part as it touches more time range. // Hence at peak it is 2*21 (subquery from 0s to 20s) - // + 11 (buffer of a series per evaluation) + // + 10 (buffer of a series per evaluation) // + 6 (result from 2 series at 3 eval times). Query: `rate(bigmetric[10s:1s]) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 59, + MaxSamples: 58, Start: time.Unix(10, 0), End: time.Unix(20, 0), Interval: 5 * time.Second, }, { // Nested subquery. - // We saw that innermost rate takes 35 samples which is still the peak + // We saw that innermost rate takes 34 samples which is still the peak // since the other two subqueries just duplicate the result. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, - MaxSamples: 35, + MaxSamples: 34, Start: time.Unix(10, 0), }, { // Nested subquery. - // Now the outmost subquery produces more samples than inner most rate. + // Now the outermost subquery produces more samples than inner most rate. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`, MaxSamples: 36, Start: time.Unix(10, 0), }, } - engine := test.QueryEngine() for _, c := range cases { t.Run(c.Query, func(t *testing.T) { + engine := newTestEngine(t) testFunc := func(expError error) { var err error - var qry Query + var qry promql.Query if c.Interval == 0 { - qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) } else { - qry, err = engine.NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) + qry, err = engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) stats := qry.Stats() require.Equal(t, expError, res.Err) require.NotNil(t, stats) @@ -1405,18 +1501,19 @@ load 10s } // Within limit. - engine.maxSamplesPerQuery = c.MaxSamples + engine = promqltest.NewTestEngine(t, false, 0, c.MaxSamples) testFunc(nil) // Exceeding limit. - engine.maxSamplesPerQuery = c.MaxSamples - 1 - testFunc(ErrTooManySamples(env)) + engine = promqltest.NewTestEngine(t, false, 0, c.MaxSamples-1) + testFunc(promql.ErrTooManySamples(env)) }) } } func TestAtModifier(t *testing.T) { - test, err := NewTest(t, ` + engine := newTestEngine(t) + storage := promqltest.LoadedStorage(t, ` load 10s metric{job="1"} 0+1x1000 metric{job="2"} 0+2x1000 @@ -1427,11 +1524,7 @@ load 10s load 1ms metric_ms 0+1x10000 `) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) + t.Cleanup(func() { storage.Close() }) lbls1 := labels.FromStrings("__name__", "metric", "job", "1") lbls2 := labels.FromStrings("__name__", "metric", "job", "2") @@ -1441,7 +1534,7 @@ load 1ms lblsneg := labels.FromStrings("__name__", "metric_neg") // Add some samples with negative timestamp. - db := test.TSDB() + db := storage.DB app := db.Appender(context.Background()) ref, err := app.Append(0, lblsneg, -1000000, 1000) require.NoError(t, err) @@ -1464,137 +1557,137 @@ load 1ms { // Time of the result is the evaluation time. query: `metric_neg @ 0`, start: 100, - result: Vector{ - Sample{F: 1, T: 100000, Metric: lblsneg}, + result: promql.Vector{ + promql.Sample{F: 1, T: 100000, Metric: lblsneg}, }, }, { query: `metric_neg @ -200`, start: 100, - result: Vector{ - Sample{F: 201, T: 100000, Metric: lblsneg}, + result: promql.Vector{ + promql.Sample{F: 201, T: 100000, Metric: lblsneg}, }, }, { query: `metric{job="2"} @ 50`, start: -2, end: 2, interval: 1, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 10, T: -2000}, {F: 10, T: -1000}, {F: 10, T: 0}, {F: 10, T: 1000}, {F: 10, T: 2000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10, T: -2000}, {F: 10, T: -1000}, {F: 10, T: 0}, {F: 10, T: 1000}, {F: 10, T: 2000}}, Metric: lbls2, }, }, }, { // Timestamps for matrix selector does not depend on the evaluation time. query: "metric[20s] @ 300", start: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 29, T: 290000}, {F: 30, T: 300000}}, Metric: lbls1, }, - Series{ - Floats: []FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 58, T: 290000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, }, { query: `metric_neg[2s] @ 0`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: -1000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, }, { query: `metric_neg[3s] @ -500`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, Metric: lblsneg, }, }, }, { query: `metric_ms[3ms] @ 2.345`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, Metric: lblsms, }, }, }, { query: "metric[100s:25s] @ 300", start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, Metric: lbls1, }, - Series{ - Floats: []FPoint{{F: 40, T: 200000}, {F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 40, T: 200000}, {F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, }, { query: "metric_neg[50s:25s] @ 0", start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, }, { query: "metric_neg[50s:25s] @ -100", start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}}, Metric: lblsneg, }, }, }, { query: `metric_ms[100ms:25ms] @ 2.345`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 2250, T: 2250}, {F: 2275, T: 2275}, {F: 2300, T: 2300}, {F: 2325, T: 2325}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2250, T: 2250}, {F: 2275, T: 2275}, {F: 2300, T: 2300}, {F: 2325, T: 2325}}, Metric: lblsms, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ 100))`, start: 50, end: 80, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 995, T: 50000}, {F: 994, T: 60000}, {F: 993, T: 70000}, {F: 992, T: 80000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 995, T: 50000}, {F: 994, T: 60000}, {F: 993, T: 70000}, {F: 992, T: 80000}}, Metric: lblstopk3, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ 5000))`, start: 50, end: 80, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 10, T: 50000}, {F: 12, T: 60000}, {F: 14, T: 70000}, {F: 16, T: 80000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10, T: 50000}, {F: 12, T: 60000}, {F: 14, T: 70000}, {F: 16, T: 80000}}, Metric: lblstopk2, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ end()))`, start: 70, end: 100, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 993, T: 70000}, {F: 992, T: 80000}, {F: 991, T: 90000}, {F: 990, T: 100000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 993, T: 70000}, {F: 992, T: 80000}, {F: 991, T: 90000}, {F: 990, T: 100000}}, Metric: lblstopk3, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ start()))`, start: 100, end: 130, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 990, T: 100000}, {F: 989, T: 110000}, {F: 988, T: 120000}, {F: 987, T: 130000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 990, T: 100000}, {F: 989, T: 110000}, {F: 988, T: 120000}, {F: 987, T: 130000}}, Metric: lblstopk3, }, }, @@ -1603,9 +1696,9 @@ load 1ms // The trick here is that the query range should be > lookback delta. query: `timestamp(metric_timestamp @ 3600)`, start: 0, end: 7 * 60, interval: 60, - result: Matrix{ - Series{ - Floats: []FPoint{ + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{ {F: 3600, T: 0}, {F: 3600, T: 60 * 1000}, {F: 3600, T: 2 * 60 * 1000}, @@ -1615,7 +1708,8 @@ load 1ms {F: 3600, T: 6 * 60 * 1000}, {F: 3600, T: 7 * 60 * 1000}, }, - Metric: labels.EmptyLabels(), + Metric: labels.EmptyLabels(), + DropName: true, }, }, }, @@ -1628,87 +1722,29 @@ load 1ms } start, end, interval := time.Unix(c.start, 0), time.Unix(c.end, 0), time.Duration(c.interval)*time.Second var err error - var qry Query + var qry promql.Query if c.end == 0 { - qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.query, start) + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.query, start) } else { - qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.query, start, end, interval) + qry, err = engine.NewRangeQuery(context.Background(), storage, nil, c.query, start, end, interval) } require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) - if expMat, ok := c.result.(Matrix); ok { + if expMat, ok := c.result.(promql.Matrix); ok { sort.Sort(expMat) - sort.Sort(res.Value.(Matrix)) + sort.Sort(res.Value.(promql.Matrix)) } - require.Equal(t, c.result, res.Value, "query %q failed", c.query) + testutil.RequireEqual(t, c.result, res.Value, "query %q failed", c.query) }) } } -func TestRecoverEvaluatorRuntime(t *testing.T) { - var output []interface{} - logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { - output = append(output, keyvals...) - return nil - })) - ev := &evaluator{logger: logger} - - expr, _ := parser.ParseExpr("sum(up)") - - var err error - - defer func() { - require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0") - require.Contains(t, output, "sum(up)") - }() - defer ev.recover(expr, nil, &err) - - // Cause a runtime panic. - var a []int - //nolint:govet - a[123] = 1 -} - -func TestRecoverEvaluatorError(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} - var err error - - e := errors.New("custom error") - - defer func() { - require.EqualError(t, err, e.Error()) - }() - defer ev.recover(nil, nil, &err) - - panic(e) -} - -func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} - var err error - var ws storage.Warnings - - warnings := storage.Warnings{errors.New("custom warning")} - e := errWithWarnings{ - err: errors.New("custom error"), - warnings: warnings, - } - - defer func() { - require.EqualError(t, err, e.Error()) - require.Equal(t, warnings, ws, "wrong warning message") - }() - defer ev.recover(nil, &ws, &err) - - panic(e) -} - func TestSubquerySelector(t *testing.T) { type caseType struct { Query string - Result Result + Result promql.Result Start time.Time } @@ -1722,11 +1758,11 @@ func TestSubquerySelector(t *testing.T) { cases: []caseType{ { Query: "metric[20s:10s]", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1736,11 +1772,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s]", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1750,11 +1786,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 2s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1764,11 +1800,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 6s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1778,11 +1814,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 4s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1792,11 +1828,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 5s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1806,11 +1842,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 6s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1820,11 +1856,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 7s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1843,11 +1879,11 @@ func TestSubquerySelector(t *testing.T) { cases: []caseType{ { // Normal selector. Query: `http_requests{group=~"pro.*",instance="0"}[30s:10s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 9990, T: 9990000}, {F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 9990, T: 9990000}, {F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1857,11 +1893,11 @@ func TestSubquerySelector(t *testing.T) { }, { // Default step. Query: `http_requests{group=~"pro.*",instance="0"}[5m:]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 9840, T: 9840000}, {F: 9900, T: 9900000}, {F: 9960, T: 9960000}, {F: 130, T: 10020000}, {F: 310, T: 10080000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 9840, T: 9840000}, {F: 9900, T: 9900000}, {F: 9960, T: 9960000}, {F: 130, T: 10020000}, {F: 310, T: 10080000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1871,11 +1907,11 @@ func TestSubquerySelector(t *testing.T) { }, { // Checking if high offset (>LookbackDelta) is being taken care of. Query: `http_requests{group=~"pro.*",instance="0"}[5m:] offset 20m`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 8640, T: 8640000}, {F: 8700, T: 8700000}, {F: 8760, T: 8760000}, {F: 8820, T: 8820000}, {F: 8880, T: 8880000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 8640, T: 8640000}, {F: 8700, T: 8700000}, {F: 8760, T: 8760000}, {F: 8820, T: 8820000}, {F: 8880, T: 8880000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1885,24 +1921,28 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `rate(http_requests[1m])[15s:5s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + DropName: true, }, - Series{ - Floats: []FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + promql.Series{ + Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + DropName: true, }, - Series{ - Floats: []FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + DropName: true, }, - Series{ - Floats: []FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + DropName: true, }, }, nil, @@ -1911,11 +1951,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 270, T: 90000}, {F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 270, T: 90000}, {F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -1925,11 +1965,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests)[40s:10s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 800, T: 80000}, {F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 800, T: 80000}, {F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -1939,11 +1979,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s:5s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1000, T: 100000}, {F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1000, T: 100000}, {F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -1955,72 +1995,26 @@ func TestSubquerySelector(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - test, err := NewTest(t, tst.loadString) - require.NoError(t, err) - defer test.Close() + engine := newTestEngine(t) + storage := promqltest.LoadedStorage(t, tst.loadString) + t.Cleanup(func() { storage.Close() }) - require.NoError(t, test.Run()) - engine := test.QueryEngine() for _, c := range tst.cases { t.Run(c.Query, func(t *testing.T) { - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.Equal(t, c.Result.Err, res.Err) - mat := res.Value.(Matrix) + mat := res.Value.(promql.Matrix) sort.Sort(mat) - require.Equal(t, c.Result.Value, mat) + testutil.RequireEqual(t, c.Result.Value, mat) }) } }) } } -func TestTimestampFunction_StepsMoreOftenThanSamples(t *testing.T) { - test, err := NewTest(t, ` -load 1m - metric 0+1x1000 -`) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) - - query := "timestamp(metric)" - start := time.Unix(0, 0) - end := time.Unix(61, 0) - interval := time.Second - - // We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s. - expectedPoints := []FPoint{} - - for t := 0; t <= 59; t++ { - expectedPoints = append(expectedPoints, FPoint{F: 0, T: int64(t * 1000)}) - } - - expectedPoints = append( - expectedPoints, - FPoint{F: 60, T: 60_000}, - FPoint{F: 60, T: 61_000}, - ) - - expectedResult := Matrix{ - Series{ - Floats: expectedPoints, - Metric: labels.EmptyLabels(), - }, - } - - qry, err := test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, query, start, end, interval) - require.NoError(t, err) - - res := qry.Exec(test.Context()) - require.NoError(t, res.Err) - require.Equal(t, expectedResult, res.Value) -} - type FakeQueryLogger struct { closed bool logs []interface{} @@ -2044,25 +2038,25 @@ func (f *FakeQueryLogger) Log(l ...interface{}) error { } func TestQueryLogger_basic(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) queryExec := func() { ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { return contextDone(ctx, "test statement execution") }) res := query.Exec(ctx) require.NoError(t, res.Err) } - // Query works without query log initialized. + // promql.Query works without query log initialized. queryExec() f1 := NewFakeQueryLogger() @@ -2074,7 +2068,7 @@ func TestQueryLogger_basic(t *testing.T) { l := len(f1.logs) queryExec() - require.Equal(t, 2*l, len(f1.logs)) + require.Len(t, f1.logs, 2*l) // Test that we close the query logger when unsetting it. require.False(t, f1.closed, "expected f1 to be open, got closed") @@ -2095,21 +2089,21 @@ func TestQueryLogger_basic(t *testing.T) { } func TestQueryLogger_fields(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) - ctx = NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) + ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) defer cancelCtx() - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { return contextDone(ctx, "test statement execution") }) @@ -2124,22 +2118,22 @@ func TestQueryLogger_fields(t *testing.T) { } func TestQueryLogger_error(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) - ctx = NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) + ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) defer cancelCtx() testErr := errors.New("failure") - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { return testErr }) @@ -2164,7 +2158,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { expected: &parser.StepInvariantExpr{ Expr: &parser.NumberLiteral{ Val: 123.4567, - PosRange: parser.PositionRange{Start: 0, End: 8}, + PosRange: posrange.PositionRange{Start: 0, End: 8}, }, }, }, @@ -2173,7 +2167,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { expected: &parser.StepInvariantExpr{ Expr: &parser.StringLiteral{ Val: "foo", - PosRange: parser.PositionRange{Start: 0, End: 5}, + PosRange: posrange.PositionRange{Start: 0, End: 5}, }, }, }, @@ -2186,7 +2180,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 3, }, @@ -2196,7 +2190,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 6, End: 9, }, @@ -2213,7 +2207,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 3, }, @@ -2224,7 +2218,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 6, End: 14, }, @@ -2244,7 +2238,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 8, }, @@ -2255,7 +2249,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 11, End: 19, }, @@ -2273,7 +2267,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 4, }, @@ -2293,7 +2287,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, "a", "b"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 11, }, @@ -2312,13 +2306,13 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 13, End: 24, }, }, Grouping: []string{"foo"}, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 25, }, @@ -2334,14 +2328,14 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 13, End: 29, }, Timestamp: makeInt64Pointer(10000), }, Grouping: []string{"foo"}, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 30, }, @@ -2361,13 +2355,13 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric1"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 4, End: 21, }, Timestamp: makeInt64Pointer(10000), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 22, }, @@ -2379,13 +2373,13 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric2"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 29, End: 46, }, Timestamp: makeInt64Pointer(20000), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 25, End: 47, }, @@ -2405,7 +2399,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 11, }, @@ -2422,7 +2416,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 29, End: 40, }, @@ -2432,19 +2426,19 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { EndPos: 49, }, }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 24, End: 50, }, }, Param: &parser.NumberLiteral{ Val: 5, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 21, End: 22, }, }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 16, End: 51, }, @@ -2457,7 +2451,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { expected: &parser.Call{ Func: parser.MustGetFunction("time"), Args: parser.Expressions{}, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 6, }, @@ -2472,7 +2466,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 14, }, @@ -2492,7 +2486,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 14, }, @@ -2517,13 +2511,13 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 4, End: 23, }, Timestamp: makeInt64Pointer(20000), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 24, }, @@ -2554,7 +2548,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 19, End: 33, }, @@ -2563,7 +2557,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { EndPos: 37, }, }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 14, End: 38, }, @@ -2573,7 +2567,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { EndPos: 56, }, }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 57, }, @@ -2593,7 +2587,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 27, }, @@ -2615,7 +2609,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 11, }, @@ -2643,7 +2637,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 1, End: 4, }, @@ -2656,14 +2650,14 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"), }, Timestamp: makeInt64Pointer(1234000), - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 7, End: 27, }, }, }, }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 28, }, @@ -2694,18 +2688,18 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "metric"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 8, End: 19, }, Timestamp: makeInt64Pointer(10000), }}, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 4, End: 20, }, }}, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 21, }, @@ -2727,13 +2721,13 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric1"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 8, End: 25, }, Timestamp: makeInt64Pointer(10000), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 4, End: 26, }, @@ -2745,19 +2739,19 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric2"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 33, End: 50, }, Timestamp: makeInt64Pointer(20000), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 29, End: 52, }, }, }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 52, }, @@ -2772,7 +2766,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 13, }, @@ -2789,7 +2783,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 11, }, @@ -2809,7 +2803,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 4, }, @@ -2830,7 +2824,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 4, }, @@ -2849,7 +2843,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 11, }, @@ -2871,7 +2865,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 11, }, @@ -2901,7 +2895,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { LabelMatchers: []*labels.Matcher{ parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 6, End: 17, }, @@ -2912,20 +2906,20 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { Op: parser.MUL, LHS: &parser.NumberLiteral{ Val: 3, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 21, End: 22, }, }, RHS: &parser.NumberLiteral{ Val: 1024, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 25, End: 29, }, }, }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 20, End: 30, }, @@ -2933,7 +2927,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { }, }, }, - PosRange: parser.PositionRange{ + PosRange: posrange.PositionRange{ Start: 0, End: 31, }, @@ -2945,7 +2939,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { t.Run(test.input, func(t *testing.T) { expr, err := parser.ParseExpr(test.input) require.NoError(t, err) - expr = PreprocessExpr(expr, startTime, endTime) + expr = promql.PreprocessExpr(expr, startTime, endTime) if test.outputTest { require.Equal(t, test.input, expr.String(), "error on input '%s'", test.input) } @@ -2955,1076 +2949,189 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { } func TestEngineOptsValidation(t *testing.T) { - ctx := context.Background() cases := []struct { - opts EngineOpts + opts promql.EngineOpts query string fail bool expError error }{ { - opts: EngineOpts{EnableAtModifier: false}, - query: "metric @ 100", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "metric @ 100", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1m] @ 100)", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1m] @ 100)", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1h:1m] @ 100)", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1h:1m] @ 100)", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "metric @ start()", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "metric @ start()", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1m] @ start())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1m] @ start())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1h:1m] @ start())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1h:1m] @ start())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "metric @ end()", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "metric @ end()", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1m] @ end())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1m] @ end())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1h:1m] @ end())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1h:1m] @ end())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: true}, + opts: promql.EngineOpts{EnableAtModifier: true}, query: "metric @ 100", }, { - opts: EngineOpts{EnableAtModifier: true}, + opts: promql.EngineOpts{EnableAtModifier: true}, query: "rate(metric[1m] @ start())", }, { - opts: EngineOpts{EnableAtModifier: true}, + opts: promql.EngineOpts{EnableAtModifier: true}, query: "rate(metric[1h:1m] @ end())", }, { - opts: EngineOpts{EnableNegativeOffset: false}, - query: "metric offset -1s", fail: true, expError: ErrValidationNegativeOffsetDisabled, + opts: promql.EngineOpts{EnableNegativeOffset: false}, + query: "metric offset -1s", fail: true, expError: promql.ErrValidationNegativeOffsetDisabled, }, { - opts: EngineOpts{EnableNegativeOffset: true}, + opts: promql.EngineOpts{EnableNegativeOffset: true}, query: "metric offset -1s", }, { - opts: EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, + opts: promql.EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, query: "metric @ 100 offset -2m", }, { - opts: EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, + opts: promql.EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, query: "metric offset -2m @ 100", }, } for _, c := range cases { - eng := NewEngine(c.opts) - _, err1 := eng.NewInstantQuery(ctx, nil, nil, c.query, time.Unix(10, 0)) - _, err2 := eng.NewRangeQuery(ctx, nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) + eng := promqltest.NewTestEngineWithOpts(t, c.opts) + _, err1 := eng.NewInstantQuery(context.Background(), nil, nil, c.query, time.Unix(10, 0)) + _, err2 := eng.NewRangeQuery(context.Background(), nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) if c.fail { require.Equal(t, c.expError, err1) require.Equal(t, c.expError, err2) } else { - require.Nil(t, err1) - require.Nil(t, err2) + require.NoError(t, err1) + require.NoError(t, err2) } } } -func TestRangeQuery(t *testing.T) { - cases := []struct { - Name string - Load string - Query string - Result parser.Value - Start time.Time - End time.Time - Interval time.Duration +func TestEngine_Close(t *testing.T) { + t.Run("nil engine", func(t *testing.T) { + var ng *promql.Engine + require.NoError(t, ng.Close()) + }) + + t.Run("non-nil engine", func(t *testing.T) { + ng := promql.NewEngine(promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 0, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: nil, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: false, + LookbackDelta: 0, + EnableDelayedNameRemoval: true, + }) + require.NoError(t, ng.Close()) + }) +} + +func TestInstantQueryWithRangeVectorSelector(t *testing.T) { + engine := newTestEngine(t) + + baseT := timestamp.Time(0) + storage := promqltest.LoadedStorage(t, ` + load 1m + some_metric{env="1"} 0+1x4 + some_metric{env="2"} 0+2x4 + some_metric_with_stale_marker 0 1 stale 3 + `) + t.Cleanup(func() { require.NoError(t, storage.Close()) }) + + testCases := map[string]struct { + expr string + expected promql.Matrix + ts time.Time }{ - { - Name: "sum_over_time with all values", - Load: `load 30s - bar 0 1 10 100 1000`, - Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, - Metric: labels.EmptyLabels(), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 60 * time.Second, - }, - { - Name: "sum_over_time with trailing values", - Load: `load 30s - bar 0 1 10 100 1000 0 0 0 0`, - Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, - Metric: labels.EmptyLabels(), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 60 * time.Second, - }, - { - Name: "sum_over_time with all values long", - Load: `load 30s - bar 0 1 10 100 1000 10000 100000 1000000 10000000`, - Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}, {F: 110000, T: 180000}, {F: 11000000, T: 240000}}, - Metric: labels.EmptyLabels(), + "matches series with points in range": { + expr: "some_metric[2m]", + ts: baseT.Add(2 * time.Minute), + expected: promql.Matrix{ + { + Metric: labels.FromStrings("__name__", "some_metric", "env", "1"), + Floats: []promql.FPoint{ + {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1}, + {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 2}, + }, }, - }, - Start: time.Unix(0, 0), - End: time.Unix(240, 0), - Interval: 60 * time.Second, - }, - { - Name: "sum_over_time with all values random", - Load: `load 30s - bar 5 17 42 2 7 905 51`, - Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 5, T: 0}, {F: 59, T: 60000}, {F: 9, T: 120000}, {F: 956, T: 180000}}, - Metric: labels.EmptyLabels(), + { + Metric: labels.FromStrings("__name__", "some_metric", "env", "2"), + Floats: []promql.FPoint{ + {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 2}, + {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 4}, + }, }, }, - Start: time.Unix(0, 0), - End: time.Unix(180, 0), - Interval: 60 * time.Second, }, - { - Name: "metric query", - Load: `load 30s - metric 1+1x4`, - Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings("__name__", "metric"), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 1 * time.Minute, + "matches no series": { + expr: "some_nonexistent_metric[1m]", + ts: baseT, + expected: promql.Matrix{}, }, - { - Name: "metric query with trailing values", - Load: `load 30s - metric 1+1x8`, - Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings("__name__", "metric"), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 1 * time.Minute, + "no samples in range": { + expr: "some_metric[1m]", + ts: baseT.Add(20 * time.Minute), + expected: promql.Matrix{}, }, - { - Name: "short-circuit", - Load: `load 30s - foo{job="1"} 1+1x4 - bar{job="2"} 1+1x4`, - Query: `foo > 2 or bar`, - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings( - "__name__", "bar", - "job", "2", - ), - }, - Series{ - Floats: []FPoint{{F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings( - "__name__", "foo", - "job", "1", - ), + "metric with stale marker": { + expr: "some_metric_with_stale_marker[3m]", + ts: baseT.Add(3 * time.Minute), + expected: promql.Matrix{ + { + Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"), + Floats: []promql.FPoint{ + {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1}, + {T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3}, + }, }, }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 1 * time.Minute, - }, - } - for _, c := range cases { - t.Run(c.Name, func(t *testing.T) { - test, err := NewTest(t, c.Load) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) - - qry, err := test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) - require.NoError(t, err) - - res := qry.Exec(test.Context()) - require.NoError(t, res.Err) - require.Equal(t, c.Result, res.Value) - }) - } -} - -func TestNativeHistogramRate(t *testing.T) { - // TODO(beorn7): Integrate histograms into the PromQL testing framework - // and write more tests there. - test, err := NewTest(t, "") - require.NoError(t, err) - defer test.Close() - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - - app := test.Storage().Appender(context.TODO()) - for i, h := range tsdbutil.GenerateTestHistograms(100) { - _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h, nil) - require.NoError(t, err) - } - require.NoError(t, app.Commit()) - - require.NoError(t, test.Run()) - engine := test.QueryEngine() - - queryString := fmt.Sprintf("rate(%s[1m])", seriesName) - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) - require.NoError(t, err) - res := qry.Exec(test.Context()) - require.NoError(t, res.Err) - vector, err := res.Vector() - require.NoError(t, err) - require.Len(t, vector, 1) - actualHistogram := vector[0].H - expectedHistogram := &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 1, - ZeroThreshold: 0.001, - ZeroCount: 1. / 15., - Count: 8. / 15., - Sum: 1.226666666666667, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - } - require.Equal(t, expectedHistogram, actualHistogram) -} - -func TestNativeFloatHistogramRate(t *testing.T) { - // TODO(beorn7): Integrate histograms into the PromQL testing framework - // and write more tests there. - test, err := NewTest(t, "") - require.NoError(t, err) - defer test.Close() - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - - app := test.Storage().Appender(context.TODO()) - for i, fh := range tsdbutil.GenerateTestFloatHistograms(100) { - _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), nil, fh) - require.NoError(t, err) - } - require.NoError(t, app.Commit()) - - require.NoError(t, test.Run()) - engine := test.QueryEngine() - - queryString := fmt.Sprintf("rate(%s[1m])", seriesName) - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) - require.NoError(t, err) - res := qry.Exec(test.Context()) - require.NoError(t, res.Err) - vector, err := res.Vector() - require.NoError(t, err) - require.Len(t, vector, 1) - actualHistogram := vector[0].H - expectedHistogram := &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 1, - ZeroThreshold: 0.001, - ZeroCount: 1. / 15., - Count: 8. / 15., - Sum: 1.226666666666667, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - } - require.Equal(t, expectedHistogram, actualHistogram) -} - -func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - h := &histogram.Histogram{ - Count: 24, - ZeroCount: 4, - ZeroThreshold: 0.001, - Sum: 100, - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, }, - NegativeBuckets: []int64{2, 1, -2, 3}, } - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - engine := test.QueryEngine() - - ts := int64(10 * time.Minute / time.Millisecond) - app := test.Storage().Appender(context.TODO()) - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat()) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - queryString := fmt.Sprintf("histogram_count(%s)", seriesName) - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(test.Context()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if floatHisto { - require.Equal(t, h.ToFloat().Count, vector[0].F) - } else { - require.Equal(t, float64(h.Count), vector[0].F) - } - - queryString = fmt.Sprintf("histogram_sum(%s)", seriesName) - qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + q, err := engine.NewInstantQuery(context.Background(), storage, nil, testCase.expr, testCase.ts) require.NoError(t, err) + defer q.Close() - res = qry.Exec(test.Context()) + res := q.Exec(context.Background()) require.NoError(t, res.Err) - - vector, err = res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if floatHisto { - require.Equal(t, h.ToFloat().Sum, vector[0].F) - } else { - require.Equal(t, h.Sum, vector[0].F) - } + testutil.RequireEqual(t, testCase.expected, res.Value) }) } } -func TestNativeHistogram_HistogramQuantile(t *testing.T) { +func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { // TODO(codesome): Integrate histograms into the PromQL testing framework // and write more tests there. - type subCase struct { - quantile string - value float64 - } - cases := []struct { - text string - // Histogram to test. - h *histogram.Histogram - // Different quantiles to test for this histogram. - subCases []subCase + histograms []histogram.Histogram + expected histogram.FloatHistogram + expectedAvg histogram.FloatHistogram }{ { - text: "all positive buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - }, - subCases: []subCase{ - { - quantile: "1.0001", - value: math.Inf(1), - }, - { - quantile: "1", - value: 16, - }, - { - quantile: "0.99", - value: 15.759999999999998, - }, - { - quantile: "0.9", - value: 13.600000000000001, - }, - { - quantile: "0.6", - value: 4.799999999999997, - }, - { - quantile: "0.5", - value: 1.6666666666666665, - }, - { // Zero bucket. - quantile: "0.1", - value: 0.0006000000000000001, - }, - { - quantile: "0", - value: 0, - }, - { - quantile: "-1", - value: math.Inf(-1), - }, - }, - }, - { - text: "all negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - subCases: []subCase{ - { - quantile: "1.0001", - value: math.Inf(1), - }, - { // Zero bucket. - quantile: "1", - value: 0, - }, - { // Zero bucket. - quantile: "0.99", - value: -6.000000000000048e-05, - }, - { // Zero bucket. - quantile: "0.9", - value: -0.0005999999999999996, - }, - { - quantile: "0.5", - value: -1.6666666666666667, - }, - { - quantile: "0.1", - value: -13.6, - }, - { - quantile: "0", - value: -16, - }, - { - quantile: "-1", - value: math.Inf(-1), - }, - }, - }, - { - text: "both positive and negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 24, - ZeroCount: 4, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - subCases: []subCase{ - { - quantile: "1.0001", - value: math.Inf(1), - }, - { - quantile: "1", - value: 16, - }, - { - quantile: "0.99", - value: 15.519999999999996, - }, - { - quantile: "0.9", - value: 11.200000000000003, - }, - { - quantile: "0.7", - value: 1.2666666666666657, - }, - { // Zero bucket. - quantile: "0.55", - value: 0.0006000000000000005, - }, - { // Zero bucket. - quantile: "0.5", - value: 0, - }, - { // Zero bucket. - quantile: "0.45", - value: -0.0005999999999999996, - }, - { - quantile: "0.3", - value: -1.266666666666667, - }, - { - quantile: "0.1", - value: -11.2, - }, - { - quantile: "0.01", - value: -15.52, - }, - { - quantile: "0", - value: -16, - }, - { - quantile: "-1", - value: math.Inf(-1), - }, - }, - }, - } - - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) - idx := int64(0) - for _, floatHisto := range []bool{true, false} { - for _, c := range cases { - t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - engine := test.QueryEngine() - ts := idx * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) - } else { - _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - for j, sc := range c.subCases { - t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) { - queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName) - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(test.Context()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - require.True(t, almostEqual(sc.value, vector[0].F)) - }) - } - idx++ - }) - } - } -} - -func TestNativeHistogram_HistogramFraction(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - type subCase struct { - lower, upper string - value float64 - } - - invariantCases := []subCase{ - { - lower: "42", - upper: "3.1415", - value: 0, - }, - { - lower: "0", - upper: "0", - value: 0, - }, - { - lower: "0.000001", - upper: "0.000001", - value: 0, - }, - { - lower: "42", - upper: "42", - value: 0, - }, - { - lower: "-3.1", - upper: "-3.1", - value: 0, - }, - { - lower: "3.1415", - upper: "NaN", - value: math.NaN(), - }, - { - lower: "NaN", - upper: "42", - value: math.NaN(), - }, - { - lower: "NaN", - upper: "NaN", - value: math.NaN(), - }, - { - lower: "-Inf", - upper: "+Inf", - value: 1, - }, - } - - cases := []struct { - text string - // Histogram to test. - h *histogram.Histogram - // Different ranges to test for this histogram. - subCases []subCase - }{ - { - text: "empty histogram", - h: &histogram.Histogram{}, - subCases: []subCase{ - { - lower: "3.1415", - upper: "42", - value: math.NaN(), - }, - }, - }, - { - text: "all positive buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4 - }, - subCases: append([]subCase{ - { - lower: "0", - upper: "+Inf", - value: 1, - }, - { - lower: "-Inf", - upper: "0", - value: 0, - }, - { - lower: "-0.001", - upper: "0", - value: 0, - }, - { - lower: "0", - upper: "0.001", - value: 2. / 12., - }, - { - lower: "0", - upper: "0.0005", - value: 1. / 12., - }, - { - lower: "0.001", - upper: "inf", - value: 10. / 12., - }, - { - lower: "-inf", - upper: "-0.001", - value: 0, - }, - { - lower: "1", - upper: "2", - value: 3. / 12., - }, - { - lower: "1.5", - upper: "2", - value: 1.5 / 12., - }, - { - lower: "1", - upper: "8", - value: 4. / 12., - }, - { - lower: "1", - upper: "6", - value: 3.5 / 12., - }, - { - lower: "1.5", - upper: "6", - value: 2. / 12., - }, - { - lower: "-2", - upper: "-1", - value: 0, - }, - { - lower: "-2", - upper: "-1.5", - value: 0, - }, - { - lower: "-8", - upper: "-1", - value: 0, - }, - { - lower: "-6", - upper: "-1", - value: 0, - }, - { - lower: "-6", - upper: "-1.5", - value: 0, - }, - }, invariantCases...), - }, - { - text: "all negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - subCases: append([]subCase{ - { - lower: "0", - upper: "+Inf", - value: 0, - }, - { - lower: "-Inf", - upper: "0", - value: 1, - }, - { - lower: "-0.001", - upper: "0", - value: 2. / 12., - }, - { - lower: "0", - upper: "0.001", - value: 0, - }, - { - lower: "-0.0005", - upper: "0", - value: 1. / 12., - }, - { - lower: "0.001", - upper: "inf", - value: 0, - }, - { - lower: "-inf", - upper: "-0.001", - value: 10. / 12., - }, - { - lower: "1", - upper: "2", - value: 0, - }, - { - lower: "1.5", - upper: "2", - value: 0, - }, - { - lower: "1", - upper: "8", - value: 0, - }, - { - lower: "1", - upper: "6", - value: 0, - }, - { - lower: "1.5", - upper: "6", - value: 0, - }, - { - lower: "-2", - upper: "-1", - value: 3. / 12., - }, - { - lower: "-2", - upper: "-1.5", - value: 1.5 / 12., - }, - { - lower: "-8", - upper: "-1", - value: 4. / 12., - }, - { - lower: "-6", - upper: "-1", - value: 3.5 / 12., - }, - { - lower: "-6", - upper: "-1.5", - value: 2. / 12., - }, - }, invariantCases...), - }, - { - text: "both positive and negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 24, - ZeroCount: 4, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - subCases: append([]subCase{ - { - lower: "0", - upper: "+Inf", - value: 0.5, - }, - { - lower: "-Inf", - upper: "0", - value: 0.5, - }, - { - lower: "-0.001", - upper: "0", - value: 2. / 24, - }, - { - lower: "0", - upper: "0.001", - value: 2. / 24., - }, - { - lower: "-0.0005", - upper: "0.0005", - value: 2. / 24., - }, - { - lower: "0.001", - upper: "inf", - value: 10. / 24., - }, - { - lower: "-inf", - upper: "-0.001", - value: 10. / 24., - }, - { - lower: "1", - upper: "2", - value: 3. / 24., - }, - { - lower: "1.5", - upper: "2", - value: 1.5 / 24., - }, - { - lower: "1", - upper: "8", - value: 4. / 24., - }, - { - lower: "1", - upper: "6", - value: 3.5 / 24., - }, - { - lower: "1.5", - upper: "6", - value: 2. / 24., - }, - { - lower: "-2", - upper: "-1", - value: 3. / 24., - }, - { - lower: "-2", - upper: "-1.5", - value: 1.5 / 24., - }, - { - lower: "-8", - upper: "-1", - value: 4. / 24., - }, - { - lower: "-6", - upper: "-1", - value: 3.5 / 24., - }, - { - lower: "-6", - upper: "-1.5", - value: 2. / 24., - }, - }, invariantCases...), - }, - } - idx := int64(0) - for _, floatHisto := range []bool{true, false} { - for _, c := range cases { - t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - engine := test.QueryEngine() - - ts := idx * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) - } else { - _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - for j, sc := range c.subCases { - t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) { - queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName) - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(test.Context()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if math.IsNaN(sc.value) { - require.True(t, math.IsNaN(vector[0].F)) - return - } - require.Equal(t, sc.value, vector[0].F) - }) - } - idx++ - }) - } - } -} - -func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - cases := []struct { - histograms []histogram.Histogram - expected histogram.FloatHistogram - expectedAvg histogram.FloatHistogram - }{ - { - histograms: []histogram.Histogram{ + histograms: []histogram.Histogram{ { CounterResetHint: histogram.GaugeType, Schema: 0, - Count: 21, + Count: 25, Sum: 1234.5, ZeroThreshold: 0.001, ZeroCount: 4, @@ -4042,7 +3149,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { { CounterResetHint: histogram.GaugeType, Schema: 0, - Count: 36, + Count: 41, Sum: 2345.6, ZeroThreshold: 0.001, ZeroCount: 5, @@ -4062,7 +3169,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { { CounterResetHint: histogram.GaugeType, Schema: 0, - Count: 36, + Count: 41, Sum: 1111.1, ZeroThreshold: 0.001, ZeroCount: 5, @@ -4089,7 +3196,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { Schema: 0, ZeroThreshold: 0.001, ZeroCount: 14, - Count: 93, + Count: 107, Sum: 4691.2, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 7}, @@ -4106,7 +3213,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { Schema: 0, ZeroThreshold: 0.001, ZeroCount: 3.5, - Count: 23.25, + Count: 26.75, Sum: 1172.8, PositiveSpans: []histogram.Span{ {Offset: 0, Length: 7}, @@ -4125,22 +3232,24 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { for _, c := range cases { for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) seriesName := "sparse_histogram_series" seriesNameOverTime := "sparse_histogram_series_over_time" - engine := test.QueryEngine() + engine := newTestEngine(t) ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) + app := storage.Appender(context.Background()) + _, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42) + require.NoError(t, err) for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) + lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) // Since we mutate h later, we need to create a copy here. + var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) } @@ -4150,7 +3259,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond) // Since we mutate h later, we need to create a copy here. if floatHisto { - _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat()) + _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil) } @@ -4158,48 +3267,62 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { } require.NoError(t, app.Commit()) - queryAndCheck := func(queryString string, ts int64, exp Vector) { - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + queryAndCheck := func(queryString string, ts int64, exp promql.Vector) { + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) + require.Empty(t, res.Warnings) vector, err := res.Vector() require.NoError(t, err) - require.Equal(t, exp, vector) + testutil.RequireEqual(t, exp, vector) + } + queryAndCheckAnnotations := func(queryString string, ts int64, expWarnings annotations.Annotations) { + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + require.Equal(t, expWarnings, res.Warnings) } // sum(). queryString := fmt.Sprintf("sum(%s)", seriesName) - queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + + queryString = `sum({idx="0"})` + var annos annotations.Annotations + annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(posrange.PositionRange{Start: 4, End: 13})) + queryAndCheckAnnotations(queryString, ts, annos) // + operator. queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName) for idx := 1; idx < len(c.histograms); idx++ { queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) } - queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) // count(). queryString = fmt.Sprintf("count(%s)", seriesName) - queryAndCheck(queryString, ts, []Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) // avg(). queryString = fmt.Sprintf("avg(%s)", seriesName) - queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) offset := int64(len(c.histograms) - 1) newTs := ts + offset*int64(time.Minute/time.Millisecond) // sum_over_time(). - queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1) + queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels(), DropName: true}}) // avg_over_time(). - queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1) + queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels(), DropName: true}}) }) idx0++ } @@ -4217,7 +3340,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { histograms: []histogram.Histogram{ { Schema: 0, - Count: 36, + Count: 41, Sum: 2345.6, ZeroThreshold: 0.001, ZeroCount: 5, @@ -4252,7 +3375,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { }, expected: histogram.FloatHistogram{ Schema: 0, - Count: 25, + Count: 30, Sum: 1111.1, ZeroThreshold: 0.001, ZeroCount: 2, @@ -4273,7 +3396,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { histograms: []histogram.Histogram{ { Schema: 0, - Count: 36, + Count: 41, Sum: 2345.6, ZeroThreshold: 0.001, ZeroCount: 5, @@ -4308,7 +3431,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { }, expected: histogram.FloatHistogram{ Schema: 0, - Count: 25, + Count: 30, Sum: 1111.1, ZeroThreshold: 0.001, ZeroCount: 2, @@ -4343,7 +3466,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { }, { Schema: 0, - Count: 36, + Count: 41, Sum: 2345.6, ZeroThreshold: 0.001, ZeroCount: 5, @@ -4363,7 +3486,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { }, expected: histogram.FloatHistogram{ Schema: 0, - Count: -25, + Count: -30, Sum: -1111.1, ZeroThreshold: 0.001, ZeroCount: -2, @@ -4385,21 +3508,20 @@ func TestNativeHistogram_SubOperator(t *testing.T) { for _, c := range cases { for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) + engine := newTestEngine(t) + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) seriesName := "sparse_histogram_series" - engine := test.QueryEngine() - ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) + app := storage.Appender(context.Background()) for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) + lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) // Since we mutate h later, we need to create a copy here. + var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) } @@ -4407,17 +3529,27 @@ func TestNativeHistogram_SubOperator(t *testing.T) { } require.NoError(t, app.Commit()) - queryAndCheck := func(queryString string, exp Vector) { - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + queryAndCheck := func(queryString string, exp promql.Vector) { + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) vector, err := res.Vector() require.NoError(t, err) - require.Equal(t, exp, vector) + if len(vector) == len(exp) { + for i, e := range exp { + got := vector[i].H + if got != e.H { + // Error messages are better if we compare structs, not pointers. + require.Equal(t, *e.H, *got) + } + } + } + + testutil.RequireEqual(t, exp, vector) } // - operator. @@ -4425,175 +3557,10 @@ func TestNativeHistogram_SubOperator(t *testing.T) { for idx := 1; idx < len(c.histograms); idx++ { queryString += fmt.Sprintf(` - ignoring(idx) %s{idx="%d"}`, seriesName, idx) } - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) - }) - idx0++ - } - } -} - -func TestNativeHistogram_MulDivOperator(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - originalHistogram := histogram.Histogram{ - Schema: 0, - Count: 21, - Sum: 33, - ZeroThreshold: 0.001, - ZeroCount: 3, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{3, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []int64{3, 0, 0}, - } - - cases := []struct { - scalar float64 - histogram histogram.Histogram - expectedMul histogram.FloatHistogram - expectedDiv histogram.FloatHistogram - }{ - { - scalar: 3, - histogram: originalHistogram, - expectedMul: histogram.FloatHistogram{ - Schema: 0, - Count: 63, - Sum: 99, - ZeroThreshold: 0.001, - ZeroCount: 9, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{9, 9, 9}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{9, 9, 9}, - }, - expectedDiv: histogram.FloatHistogram{ - Schema: 0, - Count: 7, - Sum: 11, - ZeroThreshold: 0.001, - ZeroCount: 1, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{1, 1, 1}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{1, 1, 1}, - }, - }, - { - scalar: 0, - histogram: originalHistogram, - expectedMul: histogram.FloatHistogram{ - Schema: 0, - Count: 0, - Sum: 0, - ZeroThreshold: 0.001, - ZeroCount: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{0, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{0, 0, 0}, - }, - expectedDiv: histogram.FloatHistogram{ - Schema: 0, - Count: math.Inf(1), - Sum: math.Inf(1), - ZeroThreshold: 0.001, - ZeroCount: math.Inf(1), - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, - }, - }, - } - - idx0 := int64(0) - for _, c := range cases { - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - test, err := NewTest(t, "") - require.NoError(t, err) - t.Cleanup(test.Close) - - seriesName := "sparse_histogram_series" - floatSeriesName := "float_series" - - engine := test.QueryEngine() - - ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := test.Storage().Appender(context.TODO()) - h := c.histogram - lbls := labels.FromStrings("__name__", seriesName) - // Since we mutate h later, we need to create a copy here. - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) - } - require.NoError(t, err) - _, err = app.Append(0, labels.FromStrings("__name__", floatSeriesName), ts, c.scalar) - require.NoError(t, err) - require.NoError(t, app.Commit()) - - queryAndCheck := func(queryString string, exp Vector) { - qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(test.Context()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - require.Equal(t, exp, vector) - } - - // histogram * scalar. - queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // scalar * histogram. - queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // histogram * float. - queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // float * histogram. - queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // histogram / scalar. - queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) - - // histogram / float. - queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) }) - idx0++ } + idx0++ } } @@ -4614,43 +3581,43 @@ metric 0 1 2 }{ { name: "default lookback delta", - ts: lastDatapointTs.Add(defaultLookbackDelta), + ts: lastDatapointTs.Add(defaultLookbackDelta - time.Millisecond), expectSamples: true, }, { name: "outside default lookback delta", - ts: lastDatapointTs.Add(defaultLookbackDelta + time.Millisecond), + ts: lastDatapointTs.Add(defaultLookbackDelta), expectSamples: false, }, { name: "custom engine lookback delta", - ts: lastDatapointTs.Add(10 * time.Minute), + ts: lastDatapointTs.Add(10*time.Minute - time.Millisecond), engineLookback: 10 * time.Minute, expectSamples: true, }, { name: "outside custom engine lookback delta", - ts: lastDatapointTs.Add(10*time.Minute + time.Millisecond), + ts: lastDatapointTs.Add(10 * time.Minute), engineLookback: 10 * time.Minute, expectSamples: false, }, { name: "custom query lookback delta", - ts: lastDatapointTs.Add(20 * time.Minute), + ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond), engineLookback: 10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: true, }, { name: "outside custom query lookback delta", - ts: lastDatapointTs.Add(20*time.Minute + time.Millisecond), + ts: lastDatapointTs.Add(20 * time.Minute), engineLookback: 10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: false, }, { name: "negative custom query lookback delta", - ts: lastDatapointTs.Add(20 * time.Minute), + ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond), engineLookback: -10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: true, @@ -4660,24 +3627,17 @@ metric 0 1 2 for _, c := range cases { c := c t.Run(c.name, func(t *testing.T) { - test, err := NewTest(t, load) - require.NoError(t, err) - defer test.Close() - - err = test.Run() - require.NoError(t, err) + engine := promqltest.NewTestEngine(t, false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery) + storage := promqltest.LoadedStorage(t, load) + t.Cleanup(func() { storage.Close() }) - eng := test.QueryEngine() - if c.engineLookback != 0 { - eng.lookbackDelta = c.engineLookback - } - opts := NewPrometheusQueryOpts(false, c.queryLookback) - qry, err := eng.NewInstantQuery(test.context, test.Queryable(), opts, query, c.ts) + opts := promql.NewPrometheusQueryOpts(false, c.queryLookback) + qry, err := engine.NewInstantQuery(context.Background(), storage, opts, query, c.ts) require.NoError(t, err) - res := qry.Exec(test.Context()) + res := qry.Exec(context.Background()) require.NoError(t, res.Err) - vec, ok := res.Value.(Vector) + vec, ok := res.Value.(promql.Vector) require.True(t, ok) if c.expectSamples { require.NotEmpty(t, vec) @@ -4687,3 +3647,252 @@ metric 0 1 2 }) } } + +func makeInt64Pointer(val int64) *int64 { + valp := new(int64) + *valp = val + return valp +} + +func TestHistogramCopyFromIteratorRegression(t *testing.T) { + // Loading the following histograms creates two chunks because there's a + // counter reset. Not only the counter is lower in the last histogram + // but also there's missing buckets. + // This in turns means that chunk iterators will have different spans. + load := `load 1m +histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:1 count:1 buckets:[1]}} +` + storage := promqltest.LoadedStorage(t, load) + t.Cleanup(func() { storage.Close() }) + engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) + + verify := func(t *testing.T, qry promql.Query, expected []histogram.FloatHistogram) { + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + + m, ok := res.Value.(promql.Matrix) + require.True(t, ok) + + require.Len(t, m, 1) + series := m[0] + + require.Empty(t, series.Floats) + require.Len(t, series.Histograms, len(expected)) + for i, e := range expected { + series.Histograms[i].H.CounterResetHint = histogram.UnknownCounterReset // Don't care. + require.Equal(t, &e, series.Histograms[i].H) + } + } + + qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[90s])", time.Unix(0, 0), time.Unix(0, 0).Add(60*time.Second), time.Minute) + require.NoError(t, err) + verify(t, qry, []histogram.FloatHistogram{ + { + Count: 3, + Sum: 3, // Increase from 4 to 6 is 2. Interpolation adds 1. + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram. + PositiveBuckets: []float64{1.5, 1.5}, // Increase from 2 to 3 is 1 in both buckets. Interpolation adds 0.5. + }, + }) + + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[61s]", time.Unix(0, 0).Add(2*time.Minute)) + require.NoError(t, err) + verify(t, qry, []histogram.FloatHistogram{ + { + Count: 6, + Sum: 6, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []float64{3, 3}, + }, + { + Count: 1, + Sum: 1, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []float64{1}, + }, + }) +} + +func TestRateAnnotations(t *testing.T) { + testCases := map[string]struct { + data string + expr string + expectedWarningAnnotations []string + expectedInfoAnnotations []string + }{ + "info annotation when two samples are selected": { + data: ` + series 1 2 + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{ + `PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "series" (1:6)`, + }, + }, + "no info annotations when no samples": { + data: ` + series + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when selecting one sample": { + data: ` + series 1 2 + `, + expr: "rate(series[10s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when no samples due to mixed data types": { + data: ` + series{label="a"} 1 {{schema:1 sum:15 count:10 buckets:[1 2 3]}} + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{ + `PromQL warning: encountered a mix of histograms and floats for metric name "series" (1:6)`, + }, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when selecting two native histograms": { + data: ` + series{label="a"} {{schema:1 sum:10 count:5 buckets:[1 2 3]}} {{schema:1 sum:15 count:10 buckets:[1 2 3]}} + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + store := promqltest.LoadedStorage(t, "load 1m\n"+strings.TrimSpace(testCase.data)) + t.Cleanup(func() { _ = store.Close() }) + + engine := newTestEngine(t) + query, err := engine.NewInstantQuery(context.Background(), store, nil, testCase.expr, timestamp.Time(0).Add(1*time.Minute)) + require.NoError(t, err) + t.Cleanup(query.Close) + + res := query.Exec(context.Background()) + require.NoError(t, res.Err) + + warnings, infos := res.Warnings.AsStrings(testCase.expr, 0, 0) + testutil.RequireEqual(t, testCase.expectedWarningAnnotations, warnings) + testutil.RequireEqual(t, testCase.expectedInfoAnnotations, infos) + }) + } +} + +func TestHistogramRateWithFloatStaleness(t *testing.T) { + // Make a chunk with two normal histograms of the same value. + h1 := histogram.Histogram{ + Schema: 2, + Count: 10, + Sum: 100, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{100}, + } + + c1 := chunkenc.NewHistogramChunk() + app, err := c1.Appender() + require.NoError(t, err) + var ( + newc chunkenc.Chunk + recoded bool + ) + + newc, recoded, app, err = app.AppendHistogram(nil, 0, h1.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + newc, recoded, _, err = app.AppendHistogram(nil, 10, h1.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + // Make a chunk with a single float stale marker. + c2 := chunkenc.NewXORChunk() + app, err = c2.Appender() + require.NoError(t, err) + + app.Append(20, math.Float64frombits(value.StaleNaN)) + + // Make a chunk with two normal histograms that have zero value. + h2 := histogram.Histogram{ + Schema: 2, + } + + c3 := chunkenc.NewHistogramChunk() + app, err = c3.Appender() + require.NoError(t, err) + + newc, recoded, app, err = app.AppendHistogram(nil, 30, h2.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + newc, recoded, _, err = app.AppendHistogram(nil, 40, h2.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + querier := storage.MockQuerier{ + SelectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { + return &singleSeriesSet{ + series: mockSeries{chunks: []chunkenc.Chunk{c1, c2, c3}, labelSet: []string{"__name__", "foo"}}, + } + }, + } + + queriable := storage.MockQueryable{MockQuerier: &querier} + + engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) + + q, err := engine.NewInstantQuery(context.Background(), &queriable, nil, "rate(foo[40s])", timestamp.Time(45)) + require.NoError(t, err) + defer q.Close() + + res := q.Exec(context.Background()) + require.NoError(t, res.Err) + + vec, err := res.Vector() + require.NoError(t, err) + + // Single sample result. + require.Len(t, vec, 1) + // The result is a histogram. + require.NotNil(t, vec[0].H) + // The result should be zero as the histogram has not increased, so the rate is zero. + require.Equal(t, 0.0, vec[0].H.Count) + require.Equal(t, 0.0, vec[0].H.Sum) +} + +type singleSeriesSet struct { + series storage.Series + consumed bool +} + +func (s *singleSeriesSet) Next() bool { c := s.consumed; s.consumed = true; return !c } +func (s singleSeriesSet) At() storage.Series { return s.series } +func (s singleSeriesSet) Err() error { return nil } +func (s singleSeriesSet) Warnings() annotations.Annotations { return nil } + +type mockSeries struct { + chunks []chunkenc.Chunk + labelSet []string +} + +func (s mockSeries) Labels() labels.Labels { + return labels.FromStrings(s.labelSet...) +} + +func (s mockSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { + iterables := []chunkenc.Iterator{} + for _, c := range s.chunks { + iterables = append(iterables, c.Iterator(nil)) + } + return storage.ChainSampleIteratorFromIterators(it, iterables) +} diff --git a/promql/functions.go b/promql/functions.go index 96bffab96d..4333cb5ce0 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -11,23 +11,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many unsued function arguments in this file by design. package promql import ( + "context" + "errors" "fmt" "math" + "slices" "sort" "strconv" "strings" "time" + "github.com/facette/natsort" "github.com/grafana/regexp" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/util/annotations" ) // FunctionCall is the type of a PromQL function implementation @@ -51,20 +56,20 @@ import ( // metrics, the timestamp are not needed. // // Scalar results should be returned as the value of a sample in a Vector. -type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector +type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) // === time() float64 === -func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{ F: float64(enh.Ts) / 1000, - }} + }}, nil } // extrapolatedRate is a utility function for rate/increase/delta. // It calculates the rate (allowing for counter resets if isCounter is true), // extrapolates if the first/last sample is close to the boundary, and returns // the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) Vector { +func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) { ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( @@ -75,14 +80,15 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod resultHistogram *histogram.FloatHistogram firstT, lastT int64 numSamplesMinusOne int + annos annotations.Annotations ) // We need either at least two Histograms and no Floats, or at least two // Floats and no Histograms to calculate a rate. Otherwise, drop this // Vector element. + metricName := samples.Metric.Get(labels.MetricName) if len(samples.Histograms) > 0 && len(samples.Floats) > 0 { - // Mix of histograms and floats. TODO(beorn7): Communicate this failure reason. - return enh.Out + return enh.Out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) } switch { @@ -90,11 +96,12 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod numSamplesMinusOne = len(samples.Histograms) - 1 firstT = samples.Histograms[0].T lastT = samples.Histograms[numSamplesMinusOne].T - resultHistogram = histogramRate(samples.Histograms, isCounter) + var newAnnos annotations.Annotations + resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange()) + annos.Merge(newAnnos) if resultHistogram == nil { // The histograms are not compatible with each other. - // TODO(beorn7): Communicate this failure reason. - return enh.Out + return enh.Out, annos } case len(samples.Floats) > 1: numSamplesMinusOne = len(samples.Floats) - 1 @@ -113,8 +120,8 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod prevValue = currPoint.F } default: - // Not enough samples. TODO(beorn7): Communicate this failure reason. - return enh.Out + // TODO: add RangeTooShortWarning + return enh.Out, annos } // Duration between first/last samples and boundary of range. @@ -124,7 +131,24 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod sampledInterval := float64(lastT-firstT) / 1000 averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne) - // TODO(beorn7): Do this for histograms, too. + // If samples are close enough to the (lower or upper) boundary of the + // range, we extrapolate the rate all the way to the boundary in + // question. "Close enough" is defined as "up to 10% more than the + // average duration between samples within the range", see + // extrapolationThreshold below. Essentially, we are assuming a more or + // less regular spacing between samples, and if we don't see a sample + // where we would expect one, we assume the series does not cover the + // whole range, but starts and/or ends within the range. We still + // extrapolate the rate in this case, but not all the way to the + // boundary, but only by half of the average duration between samples + // (which is our guess for where the series actually starts or ends). + + extrapolationThreshold := averageDurationBetweenSamples * 1.1 + extrapolateToInterval := sampledInterval + + if durationToStart >= extrapolationThreshold { + durationToStart = averageDurationBetweenSamples / 2 + } if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 { // Counters cannot be negative. If we have any slope at all // (i.e. resultFloat went up), we can extrapolate the zero point @@ -132,29 +156,19 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod // than the durationToStart, we take the zero point as the start // of the series, thereby avoiding extrapolation to negative // counter values. + // TODO(beorn7): Do this for histograms, too. durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat) if durationToZero < durationToStart { durationToStart = durationToZero } } + extrapolateToInterval += durationToStart - // If the first/last samples are close to the boundaries of the range, - // extrapolate the result. This is as we expect that another sample - // will exist given the spacing between samples we've seen thus far, - // with an allowance for noise. - extrapolationThreshold := averageDurationBetweenSamples * 1.1 - extrapolateToInterval := sampledInterval - - if durationToStart < extrapolationThreshold { - extrapolateToInterval += durationToStart - } else { - extrapolateToInterval += averageDurationBetweenSamples / 2 - } - if durationToEnd < extrapolationThreshold { - extrapolateToInterval += durationToEnd - } else { - extrapolateToInterval += averageDurationBetweenSamples / 2 + if durationToEnd >= extrapolationThreshold { + durationToEnd = averageDurationBetweenSamples / 2 } + extrapolateToInterval += durationToEnd + factor := extrapolateToInterval / sampledInterval if isRate { factor /= ms.Range.Seconds() @@ -165,23 +179,38 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod resultHistogram.Mul(factor) } - return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}) + return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}), annos } // histogramRate is a helper function for extrapolatedRate. It requires // points[0] to be a histogram. It returns nil if any other Point in points is -// not a histogram. -func histogramRate(points []HPoint, isCounter bool) *histogram.FloatHistogram { +// not a histogram, and a warning wrapped in an annotation in that case. +// Otherwise, it returns the calculated histogram and an empty annotation. +func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) { prev := points[0].H + usingCustomBuckets := prev.UsesCustomBuckets() last := points[len(points)-1].H if last == nil { - return nil // Range contains a mix of histograms and floats. + return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } + minSchema := prev.Schema if last.Schema < minSchema { minSchema = last.Schema } + if last.UsesCustomBuckets() != usingCustomBuckets { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } + + var annos annotations.Annotations + + // We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point, + // so check the first and last point now. + if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos)) + } + // First iteration to find out two things: // - What's the smallest relevant schema? // - Are all data points histograms? @@ -190,66 +219,88 @@ func histogramRate(points []HPoint, isCounter bool) *histogram.FloatHistogram { for _, currPoint := range points[1 : len(points)-1] { curr := currPoint.H if curr == nil { - return nil // Range contains a mix of histograms and floats. + return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } - // TODO(trevorwhitney): Check if isCounter is consistent with curr.CounterResetHint. if !isCounter { continue } + if curr.CounterResetHint == histogram.GaugeType { + annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos)) + } if curr.Schema < minSchema { minSchema = curr.Schema } + if curr.UsesCustomBuckets() != usingCustomBuckets { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } } h := last.CopyToSchema(minSchema) - h.Sub(prev) + _, err := h.Sub(prev) + if err != nil { + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } + } if isCounter { // Second iteration to deal with counter resets. for _, currPoint := range points[1:] { curr := currPoint.H if curr.DetectReset(prev) { - h.Add(prev) + _, err := h.Add(prev) + if err != nil { + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } + } } prev = curr } + } else if points[0].H.CounterResetHint != histogram.GaugeType || points[len(points)-1].H.CounterResetHint != histogram.GaugeType { + annos.Add(annotations.NewNativeHistogramNotGaugeWarning(metricName, pos)) } h.CounterResetHint = histogram.GaugeType - return h.Compact(0) + return h.Compact(0), annos } -// === delta(Matrix parser.ValueTypeMatrix) Vector === -func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, false, false) } -// === rate(node parser.ValueTypeMatrix) Vector === -func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === rate(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, true, true) } -// === increase(node parser.ValueTypeMatrix) Vector === -func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === increase(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, true, false) } -// === irate(node parser.ValueTypeMatrix) Vector === -func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === irate(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return instantValue(vals, enh.Out, true) } -// === idelta(node model.ValMatrix) Vector === -func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === idelta(node model.ValMatrix) (Vector, Annotations) === +func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return instantValue(vals, enh.Out, false) } -func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { +func instantValue(vals []parser.Value, out Vector, isRate bool) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // No sense in trying to compute a rate without at least two points. Drop // this Vector element. + // TODO: add RangeTooShortWarning if len(samples.Floats) < 2 { - return out + return out, nil } lastSample := samples.Floats[len(samples.Floats)-1] @@ -266,7 +317,7 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { sampledInterval := lastSample.T - previousSample.T if sampledInterval == 0 { // Avoid dividing by 0. - return out + return out, nil } if isRate { @@ -274,7 +325,7 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { resultValue /= float64(sampledInterval) / 1000 } - return append(out, Sample{F: resultValue}) + return append(out, Sample{F: resultValue}), nil } // Calculate the trend value at the given index i in raw data d. @@ -299,7 +350,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects // how trends in historical data will affect the current data. A higher trend factor increases the influence. // of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". -func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // The smoothing factor argument. @@ -320,7 +371,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode // Can't do the smoothing operation with less than two points. if l < 2 { - return enh.Out + return enh.Out, nil } var s0, s1, b float64 @@ -331,7 +382,6 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode // Run the smoothing operation. var x, y float64 for i := 1; i < l; i++ { - // Scale the raw value against the smoothing factor. x = sf * samples.Floats[i].F @@ -342,72 +392,154 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode s0, s1 = s1, x+y } - return append(enh.Out, Sample{F: s1}) + return append(enh.Out, Sample{F: s1}), nil } -// === sort(node parser.ValueTypeVector) Vector === -func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sort(node parser.ValueTypeVector) (Vector, Annotations) === +func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take descending sort with NaN first and // reverse it. byValueSorter := vectorByReverseValueHeap(vals[0].(Vector)) sort.Sort(sort.Reverse(byValueSorter)) - return Vector(byValueSorter) + return Vector(byValueSorter), nil } -// === sortDesc(node parser.ValueTypeVector) Vector === -func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) === +func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take ascending sort with NaN first and // reverse it. byValueSorter := vectorByValueHeap(vals[0].(Vector)) sort.Sort(sort.Reverse(byValueSorter)) - return Vector(byValueSorter) + return Vector(byValueSorter), nil } -// === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector === -func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === +func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + // First, sort by the full label set. This ensures a consistent ordering in case sorting by the + // labels provided as arguments is not conclusive. + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + return labels.Compare(a.Metric, b.Metric) + }) + + labels := stringSliceFromArgs(args[1:]) + // Next, sort by the labels provided as arguments. + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + // Iterate over each given label. + for _, label := range labels { + lv1 := a.Metric.Get(label) + lv2 := b.Metric.Get(label) + + // If we encounter multiple samples with the same label values, the sorting which was + // performed in the first step will act as a "tie breaker". + if lv1 == lv2 { + continue + } + + if natsort.Compare(lv1, lv2) { + return -1 + } + + return +1 + } + + return 0 + }) + + return vals[0].(Vector), nil +} + +// === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === +func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + // First, sort by the full label set. This ensures a consistent ordering in case sorting by the + // labels provided as arguments is not conclusive. + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + return labels.Compare(b.Metric, a.Metric) + }) + + labels := stringSliceFromArgs(args[1:]) + // Next, sort by the labels provided as arguments. + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + // Iterate over each given label. + for _, label := range labels { + lv1 := a.Metric.Get(label) + lv2 := b.Metric.Get(label) + + // If we encounter multiple samples with the same label values, the sorting which was + // performed in the first step will act as a "tie breaker". + if lv1 == lv2 { + continue + } + + if natsort.Compare(lv1, lv2) { + return +1 + } + + return -1 + } + + return 0 + }) + + return vals[0].(Vector), nil +} + +// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === +func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].F - max := vals[2].(Vector)[0].F - if max < min { - return enh.Out + minVal := vals[1].(Vector)[0].F + maxVal := vals[2].(Vector)[0].F + if maxVal < minVal { + return enh.Out, nil } for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(el.Metric), - F: math.Max(min, math.Min(max, el.F)), + Metric: el.Metric, + F: math.Max(minVal, math.Min(maxVal, el.F)), + DropName: true, }) } - return enh.Out + return enh.Out, nil } -// === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector === -func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === +func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - max := vals[1].(Vector)[0].F + maxVal := vals[1].(Vector)[0].F for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(el.Metric), - F: math.Min(max, el.F), + Metric: el.Metric, + F: math.Min(maxVal, el.F), + DropName: true, }) } - return enh.Out + return enh.Out, nil } -// === clamp_min(Vector parser.ValueTypeVector, min Scalar) Vector === -func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === +func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].F + minVal := vals[1].(Vector)[0].F for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(el.Metric), - F: math.Max(min, el.F), + Metric: el.Metric, + F: math.Max(minVal, el.F), + DropName: true, }) } - return enh.Out + return enh.Out, nil } -// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) Vector === -func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) === +func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) // round returns a number rounded to toNearest. // Ties are solved by rounding up. @@ -421,20 +553,21 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper for _, el := range vec { f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(el.Metric), - F: f, + Metric: el.Metric, + F: f, + DropName: true, }) } - return enh.Out + return enh.Out, nil } // === Scalar(node parser.ValueTypeVector) Scalar === -func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { v := vals[0].(Vector) if len(v) != 1 { - return append(enh.Out, Sample{F: math.NaN()}) + return append(enh.Out, Sample{F: math.NaN()}), nil } - return append(enh.Out, Sample{F: v[0].F}) + return append(enh.Out, Sample{F: v[0].F}), nil } func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { @@ -443,44 +576,73 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) return append(enh.Out, Sample{F: aggrFn(el)}) } -func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector { +func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) { el := vals[0].(Matrix)[0] + res, err := aggrFn(el) - return append(enh.Out, Sample{H: aggrFn(el)}) + return append(enh.Out, Sample{H: res}), err } -// === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { - // TODO(zenador): Add warning for mixed floats and histograms. - return enh.Out +// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := vals[0].(Matrix)[0] + if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { + metricName := firstSeries.Metric.Get(labels.MetricName) + return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) } - if len(vals[0].(Matrix)[0].Floats) == 0 { + if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { count := 1 mean := s.Histograms[0].H.Copy() for _, h := range s.Histograms[1:] { count++ left := h.H.Copy().Div(float64(count)) right := mean.Copy().Div(float64(count)) - // The histogram being added/subtracted must have - // an equal or larger schema. - if h.H.Schema >= mean.Schema { - toAdd := right.Mul(-1).Add(left) - mean.Add(toAdd) - } else { - toAdd := left.Sub(right) - mean = toAdd.Add(mean) + toAdd, err := left.Sub(right) + if err != nil { + return mean, err + } + _, err = mean.Add(toAdd) + if err != nil { + return mean, err } } - return mean + return mean, nil }) + if err != nil { + metricName := firstSeries.Metric.Get(labels.MetricName) + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange())) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange())) + } + } + return vec, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - var mean, count, c float64 + var ( + sum, mean, count, kahanC float64 + incrementalMean bool + ) for _, f := range s.Floats { count++ + if !incrementalMean { + newSum, newC := kahanSumInc(f.F, sum, kahanC) + // Perform regular mean calculation as long as + // the sum doesn't overflow and (in any case) + // for the first iteration (even if we start + // with ±Inf) to not run into division-by-zero + // problems below. + if count == 1 || !math.IsInf(newSum, 0) { + sum, kahanC = newSum, newC + continue + } + // Handle overflow by reverting to incremental calculation of the mean value. + incrementalMean = true + mean = sum / (count - 1) + kahanC /= count - 1 + } if math.IsInf(mean, 0) { if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) { // The `mean` and `f.F` values are `Inf` of the same sign. They @@ -498,25 +660,25 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - mean, c = kahanSumInc(f.F/count-mean/count, mean, c) + correctedMean := mean + kahanC + mean, kahanC = kahanSumInc(f.F/count-correctedMean/count, mean, kahanC) } - - if math.IsInf(mean, 0) { - return mean + if incrementalMean { + return mean + kahanC } - return mean + c - }) + return (sum + kahanC) / count + }), nil } -// === count_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === +func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return aggrOverTime(vals, enh, func(s Series) float64 { return float64(len(s.Floats) + len(s.Histograms)) - }) + }), nil } -// === last_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === +func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { el := vals[0].(Matrix)[0] var f FPoint @@ -533,75 +695,101 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod return append(enh.Out, Sample{ Metric: el.Metric, F: f.F, - }) + }), nil } return append(enh.Out, Sample{ Metric: el.Metric, - H: h.H, - }) + H: h.H.Copy(), + }), nil } -// === max_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if len(vals[0].(Matrix)[0].Floats) == 0 { + return enh.Out, nil + } + return aggrOverTime(vals, enh, func(s Series) float64 { + values := make(vectorByValueHeap, 0, len(s.Floats)) + for _, f := range s.Floats { + values = append(values, Sample{F: f.F}) + } + median := quantile(0.5, values) + values = make(vectorByValueHeap, 0, len(s.Floats)) + for _, f := range s.Floats { + values = append(values, Sample{F: math.Abs(f.F - median)}) + } + return quantile(0.5, values) + }), nil +} + +// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. max_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - max := s.Floats[0].F + maxVal := s.Floats[0].F for _, f := range s.Floats { - if f.F > max || math.IsNaN(max) { - max = f.F + if f.F > maxVal || math.IsNaN(maxVal) { + maxVal = f.F } } - return max - }) + return maxVal + }), nil } -// === min_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. min_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - min := s.Floats[0].F + minVal := s.Floats[0].F for _, f := range s.Floats { - if f.F < min || math.IsNaN(min) { - min = f.F + if f.F < minVal || math.IsNaN(minVal) { + minVal = f.F } } - return min - }) + return minVal + }), nil } -// === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { - // TODO(zenador): Add warning for mixed floats and histograms. - return enh.Out +// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := vals[0].(Matrix)[0] + if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { + metricName := firstSeries.Metric.Get(labels.MetricName) + return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) } - if len(vals[0].(Matrix)[0].Floats) == 0 { + if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { sum := s.Histograms[0].H.Copy() for _, h := range s.Histograms[1:] { - // The histogram being added must have - // an equal or larger schema. - if h.H.Schema >= sum.Schema { - sum.Add(h.H) - } else { - sum = h.H.Copy().Add(sum) + _, err := sum.Add(h.H) + if err != nil { + return sum, err } } - return sum + return sum, nil }) + if err != nil { + metricName := firstSeries.Metric.Get(labels.MetricName) + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange())) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange())) + } + } + return vec, nil } return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 @@ -612,11 +800,11 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return sum } return sum + c - }) + }), nil } -// === quantile_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { q := vals[0].(Vector)[0].F el := vals[1].(Matrix)[0] if len(el.Floats) == 0 { @@ -624,24 +812,29 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva // histograms. quantile_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil + } + + var annos annotations.Annotations + if math.IsNaN(q) || q < 0 || q > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) } values := make(vectorByValueHeap, 0, len(el.Floats)) for _, f := range el.Floats { values = append(values, Sample{F: f.F}) } - return append(enh.Out, Sample{F: quantile(q, values)}) + return append(enh.Out, Sample{F: quantile(q, values)}), annos } -// === stddev_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. stddev_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 @@ -654,17 +847,17 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return math.Sqrt((aux + cAux) / count) - }) + }), nil } -// === stdvar_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. stdvar_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 @@ -677,170 +870,174 @@ func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return (aux + cAux) / count - }) + }), nil } -// === absent(Vector parser.ValueTypeVector) Vector === -func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Vector)) > 0 { - return enh.Out + return enh.Out, nil } return append(enh.Out, Sample{ Metric: createLabelsForAbsentFunction(args[0]), F: 1, - }) + }), nil } -// === absent_over_time(Vector parser.ValueTypeMatrix) Vector === +// === absent_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === // As this function has a matrix as argument, it does not get all the Series. // This function will return 1 if the matrix has at least one element. // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. -func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return append(enh.Out, Sample{F: 1}) +func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return append(enh.Out, Sample{F: 1}), nil } -// === present_over_time(Vector parser.ValueTypeMatrix) Vector === -func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === +func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return aggrOverTime(vals, enh, func(s Series) float64 { return 1 - }) + }), nil } func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { for _, el := range vals[0].(Vector) { if el.H == nil { // Process only float samples. + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(el.Metric), - F: f(el.F), + Metric: el.Metric, + F: f(el.F), + DropName: true, }) } } return enh.Out } -// === abs(Vector parser.ValueTypeVector) Vector === -func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Abs) +// === abs(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Abs), nil } -// === ceil(Vector parser.ValueTypeVector) Vector === -func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Ceil) +// === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Ceil), nil } -// === floor(Vector parser.ValueTypeVector) Vector === -func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Floor) +// === floor(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Floor), nil } -// === exp(Vector parser.ValueTypeVector) Vector === -func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Exp) +// === exp(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Exp), nil } -// === sqrt(Vector VectorNode) Vector === -func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sqrt) +// === sqrt(Vector VectorNode) (Vector, Annotations) === +func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sqrt), nil } -// === ln(Vector parser.ValueTypeVector) Vector === -func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log) +// === ln(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log), nil } -// === log2(Vector parser.ValueTypeVector) Vector === -func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log2) +// === log2(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log2), nil } -// === log10(Vector parser.ValueTypeVector) Vector === -func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log10) +// === log10(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log10), nil } -// === sin(Vector parser.ValueTypeVector) Vector === -func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sin) +// === sin(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sin), nil } -// === cos(Vector parser.ValueTypeVector) Vector === -func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Cos) +// === cos(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Cos), nil } -// === tan(Vector parser.ValueTypeVector) Vector === -func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Tan) +// === tan(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Tan), nil } -// == asin(Vector parser.ValueTypeVector) Vector === -func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Asin) +// === asin(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Asin), nil } -// == acos(Vector parser.ValueTypeVector) Vector === -func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Acos) +// === acos(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Acos), nil } -// == atan(Vector parser.ValueTypeVector) Vector === -func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Atan) +// === atan(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Atan), nil } -// == sinh(Vector parser.ValueTypeVector) Vector === -func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sinh) +// === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sinh), nil } -// == cosh(Vector parser.ValueTypeVector) Vector === -func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Cosh) +// === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Cosh), nil } -// == tanh(Vector parser.ValueTypeVector) Vector === -func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Tanh) +// === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Tanh), nil } -// == asinh(Vector parser.ValueTypeVector) Vector === -func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Asinh) +// === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Asinh), nil } -// == acosh(Vector parser.ValueTypeVector) Vector === -func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Acosh) +// === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Acosh), nil } -// == atanh(Vector parser.ValueTypeVector) Vector === -func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Atanh) +// === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Atanh), nil } -// === rad(Vector parser.ValueTypeVector) Vector === -func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === rad(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * math.Pi / 180 - }) + }), nil } -// === deg(Vector parser.ValueTypeVector) Vector === -func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === deg(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * 180 / math.Pi - }) + }), nil } // === pi() Scalar === -func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{F: math.Pi}} +func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return Vector{Sample{F: math.Pi}}, nil } -// === sgn(Vector parser.ValueTypeVector) Vector === -func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { switch { case v < 0: @@ -850,36 +1047,35 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) default: return v } - }) + }), nil } -// === timestamp(Vector parser.ValueTypeVector) Vector === -func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(el.Metric), - F: float64(el.T) / 1000, + Metric: el.Metric, + F: float64(el.T) / 1000, + DropName: true, }) } - return enh.Out -} - -func kahanSum(samples []float64) float64 { - var sum, c float64 - - for _, v := range samples { - sum, c = kahanSumInc(v, sum, c) - } - return sum + c + return enh.Out, nil } func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { t := sum + inc + switch { + case math.IsInf(t, 0): + c = 0 + // Using Neumaier improvement, swap if next term larger than sum. - if math.Abs(sum) >= math.Abs(inc) { + case math.Abs(sum) >= math.Abs(inc): c += (sum - t) + inc - } else { + default: c += (inc - t) + sum } return t, c @@ -931,39 +1127,39 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f return slope, intercept } -// === deriv(node parser.ValueTypeMatrix) Vector === -func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // No sense in trying to compute a derivative without at least two points. // Drop this Vector element. if len(samples.Floats) < 2 { - return enh.Out + return enh.Out, nil } // We pass in an arbitrary timestamp that is near the values in use // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 slope, _ := linearRegression(samples.Floats, samples.Floats[0].T) - return append(enh.Out, Sample{F: slope}) + return append(enh.Out, Sample{F: slope}), nil } -// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) Vector === -func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) === +func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] duration := vals[1].(Vector)[0].F // No sense in trying to predict anything without at least two points. // Drop this Vector element. if len(samples.Floats) < 2 { - return enh.Out + return enh.Out, nil } slope, intercept := linearRegression(samples.Floats, enh.Ts) - return append(enh.Out, Sample{F: slope*duration + intercept}) + return append(enh.Out, Sample{F: slope*duration + intercept}), nil } -// === histogram_count(Vector parser.ValueTypeVector) Vector === -func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -971,16 +1167,20 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN if sample.H == nil { continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(sample.Metric), - F: sample.H.Count, + Metric: sample.Metric, + F: sample.H.Count, + DropName: true, }) } - return enh.Out + return enh.Out, nil } -// === histogram_sum(Vector parser.ValueTypeVector) Vector === -func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -988,16 +1188,127 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod if sample.H == nil { continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(sample.Metric), - F: sample.H.Sum, + Metric: sample.Metric, + F: sample.H.Sum, + DropName: true, }) } - return enh.Out + return enh.Out, nil +} + +// === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } + enh.Out = append(enh.Out, Sample{ + Metric: sample.Metric, + F: sample.H.Sum / sample.H.Count, + DropName: true, + }) + } + return enh.Out, nil +} + +// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + mean := sample.H.Sum / sample.H.Count + var variance, cVariance float64 + it := sample.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + if bucket.Count == 0 { + continue + } + var val float64 + if bucket.Lower <= 0 && 0 <= bucket.Upper { + val = 0 + } else { + val = math.Sqrt(bucket.Upper * bucket.Lower) + if bucket.Upper < 0 { + val = -val + } + } + delta := val - mean + variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) + } + variance += cVariance + variance /= sample.H.Count + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } + enh.Out = append(enh.Out, Sample{ + Metric: sample.Metric, + F: math.Sqrt(variance), + DropName: true, + }) + } + return enh.Out, nil +} + +// === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + mean := sample.H.Sum / sample.H.Count + var variance, cVariance float64 + it := sample.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + if bucket.Count == 0 { + continue + } + var val float64 + if bucket.Lower <= 0 && 0 <= bucket.Upper { + val = 0 + } else { + val = math.Sqrt(bucket.Upper * bucket.Lower) + if bucket.Upper < 0 { + val = -val + } + } + delta := val - mean + variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) + } + variance += cVariance + variance /= sample.H.Count + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } + enh.Out = append(enh.Out, Sample{ + Metric: sample.Metric, + F: variance, + DropName: true, + }) + } + return enh.Out, nil } -// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === -func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { lower := vals[0].(Vector)[0].F upper := vals[1].(Vector)[0].F inVec := vals[2].(Vector) @@ -1007,18 +1318,27 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev if sample.H == nil { continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(sample.Metric), - F: histogramFraction(lower, upper, sample.H), + Metric: sample.Metric, + F: histogramFraction(lower, upper, sample.H), + DropName: true, }) } - return enh.Out + return enh.Out, nil } -// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === -func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { q := vals[0].(Vector)[0].F inVec := vals[1].(Vector) + var annos annotations.Annotations + + if math.IsNaN(q) || q < 0 || q > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) + } if enh.signatureToMetricWithBuckets == nil { enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{} @@ -1031,7 +1351,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev var histogramSamples []Sample for _, sample := range inVec { - // We are only looking for conventional buckets here. Remember + // We are only looking for classic buckets here. Remember // the histograms for later treatment. if sample.H != nil { histogramSamples = append(histogramSamples, sample) @@ -1042,8 +1362,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev sample.Metric.Get(model.BucketLabel), 64, ) if err != nil { - // Oops, no bucket label or malformed label value. Skip. - // TODO(beorn7): Issue a warning somehow. + annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), args[1].PositionRange())) continue } enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) @@ -1057,43 +1376,50 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb } mb.buckets = append(mb.buckets, bucket{upperBound, sample.F}) - } // Now deal with the histograms. for _, sample := range histogramSamples { // We have to reconstruct the exact same signature as above for - // a conventional histogram, just ignoring any le label. + // a classic histogram, just ignoring any le label. enh.lblBuf = sample.Metric.Bytes(enh.lblBuf) if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 { - // At this data point, we have conventional histogram + // At this data point, we have classic histogram // buckets and a native histogram with the same name and // labels. Do not evaluate anything. - // TODO(beorn7): Issue a warning somehow. + annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), args[1].PositionRange())) delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(sample.Metric), - F: histogramQuantile(q, sample.H), + Metric: sample.Metric, + F: histogramQuantile(q, sample.H), + DropName: true, }) } for _, mb := range enh.signatureToMetricWithBuckets { if len(mb.buckets) > 0 { + res, forcedMonotonicity, _ := bucketQuantile(q, mb.buckets) enh.Out = append(enh.Out, Sample{ Metric: mb.metric, - F: bucketQuantile(q, mb.buckets), + F: res, }) + if forcedMonotonicity { + annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange())) + } } } - return enh.Out + return enh.Out, annos } -// === resets(Matrix parser.ValueTypeMatrix) Vector === -func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats histograms := vals[0].(Matrix)[0].Histograms resets := 0 @@ -1120,17 +1446,17 @@ func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe } } - return append(enh.Out, Sample{F: float64(resets)}) + return append(enh.Out, Sample{F: float64(resets)}), nil } -// === changes(Matrix parser.ValueTypeMatrix) Vector === -func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats changes := 0 if len(floats) == 0 { // TODO(beorn7): Only histogram values, still need to add support. - return enh.Out + return enh.Out, nil } prev := floats[0].F @@ -1142,86 +1468,68 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp prev = current } - return append(enh.Out, Sample{F: float64(changes)}) + return append(enh.Out, Sample{F: float64(changes)}), nil } -// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) Vector === -func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// label_replace function operates only on series; does not look at timestamps or values. +func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { var ( - vector = vals[0].(Vector) dst = stringFromArg(args[1]) repl = stringFromArg(args[2]) src = stringFromArg(args[3]) regexStr = stringFromArg(args[4]) ) - if enh.regex == nil { - var err error - enh.regex, err = regexp.Compile("^(?:" + regexStr + ")$") - if err != nil { - panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) - } - if !model.LabelNameRE.MatchString(dst) { - panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) - } - enh.Dmn = make(map[uint64]labels.Labels, len(enh.Out)) + regex, err := regexp.Compile("^(?s:" + regexStr + ")$") + if err != nil { + panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) + } + if !model.LabelNameRE.MatchString(dst) { + panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) } - for _, el := range vector { - h := el.Metric.Hash() - var outMetric labels.Labels - if l, ok := enh.Dmn[h]; ok { - outMetric = l - } else { - srcVal := el.Metric.Get(src) - indexes := enh.regex.FindStringSubmatchIndex(srcVal) - if indexes == nil { - // If there is no match, no replacement should take place. - outMetric = el.Metric - enh.Dmn[h] = outMetric - } else { - res := enh.regex.ExpandString([]byte{}, repl, srcVal, indexes) + val, ws := ev.eval(ctx, args[0]) + matrix := val.(Matrix) + lb := labels.NewBuilder(labels.EmptyLabels()) - lb := labels.NewBuilder(el.Metric).Del(dst) - if len(res) > 0 { - lb.Set(dst, string(res)) - } - outMetric = lb.Labels() - enh.Dmn[h] = outMetric + for i, el := range matrix { + srcVal := el.Metric.Get(src) + indexes := regex.FindStringSubmatchIndex(srcVal) + if indexes != nil { // Only replace when regexp matches. + res := regex.ExpandString([]byte{}, repl, srcVal, indexes) + lb.Reset(el.Metric) + lb.Set(dst, string(res)) + matrix[i].Metric = lb.Labels() + if dst == model.MetricNameLabel { + matrix[i].DropName = false + } else { + matrix[i].DropName = el.DropName } } - - enh.Out = append(enh.Out, Sample{ - Metric: outMetric, - F: el.F, - H: el.H, - }) } - return enh.Out + if matrix.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + + return matrix, ws } -// === Vector(s Scalar) Vector === -func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === Vector(s Scalar) (Vector, Annotations) === +func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{ Metric: labels.Labels{}, F: vals[0].(Vector)[0].F, - }) + }), nil } -// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) Vector === -func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// label_join function operates only on series; does not look at timestamps or values. +func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { var ( - vector = vals[0].(Vector) dst = stringFromArg(args[1]) sep = stringFromArg(args[2]) srcLabels = make([]string, len(args)-3) ) - - if enh.Dmn == nil { - enh.Dmn = make(map[uint64]labels.Labels, len(enh.Out)) - } - for i := 3; i < len(args); i++ { src := stringFromArg(args[i]) if !model.LabelName(src).IsValid() { @@ -1229,43 +1537,32 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe } srcLabels[i-3] = src } - if !model.LabelName(dst).IsValid() { panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst)) } + val, ws := ev.eval(ctx, args[0]) + matrix := val.(Matrix) srcVals := make([]string, len(srcLabels)) - for _, el := range vector { - h := el.Metric.Hash() - var outMetric labels.Labels - if l, ok := enh.Dmn[h]; ok { - outMetric = l - } else { - - for i, src := range srcLabels { - srcVals[i] = el.Metric.Get(src) - } + lb := labels.NewBuilder(labels.EmptyLabels()) - lb := labels.NewBuilder(el.Metric) - - strval := strings.Join(srcVals, sep) - if strval == "" { - lb.Del(dst) - } else { - lb.Set(dst, strval) - } - - outMetric = lb.Labels() - enh.Dmn[h] = outMetric + for i, el := range matrix { + for i, src := range srcLabels { + srcVals[i] = el.Metric.Get(src) } + strval := strings.Join(srcVals, sep) + lb.Reset(el.Metric) + lb.Set(dst, strval) + matrix[i].Metric = lb.Labels() - enh.Out = append(enh.Out, Sample{ - Metric: outMetric, - F: el.F, - H: el.H, - }) + if dst == model.MetricNameLabel { + matrix[i].DropName = false + } else { + matrix[i].DropName = el.DropName + } } - return enh.Out + + return matrix, ws } // Common code for date related functions. @@ -1280,142 +1577,152 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo for _, el := range vals[0].(Vector) { t := time.Unix(int64(el.F), 0).UTC() + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(el.Metric), - F: f(t), + Metric: el.Metric, + F: f(t), + DropName: true, }) } return enh.Out } // === days_in_month(v Vector) Scalar === -func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) - }) + }), nil } // === day_of_month(v Vector) Scalar === -func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Day()) - }) + }), nil } // === day_of_week(v Vector) Scalar === -func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Weekday()) - }) + }), nil } // === day_of_year(v Vector) Scalar === -func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.YearDay()) - }) + }), nil } // === hour(v Vector) Scalar === -func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Hour()) - }) + }), nil } // === minute(v Vector) Scalar === -func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Minute()) - }) + }), nil } // === month(v Vector) Scalar === -func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Month()) - }) + }), nil } // === year(v Vector) Scalar === -func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Year()) - }) + }), nil } // FunctionCalls is a list of all functions supported by PromQL, including their types. var FunctionCalls = map[string]FunctionCall{ - "abs": funcAbs, - "absent": funcAbsent, - "absent_over_time": funcAbsentOverTime, - "acos": funcAcos, - "acosh": funcAcosh, - "asin": funcAsin, - "asinh": funcAsinh, - "atan": funcAtan, - "atanh": funcAtanh, - "avg_over_time": funcAvgOverTime, - "ceil": funcCeil, - "changes": funcChanges, - "clamp": funcClamp, - "clamp_max": funcClampMax, - "clamp_min": funcClampMin, - "cos": funcCos, - "cosh": funcCosh, - "count_over_time": funcCountOverTime, - "days_in_month": funcDaysInMonth, - "day_of_month": funcDayOfMonth, - "day_of_week": funcDayOfWeek, - "day_of_year": funcDayOfYear, - "deg": funcDeg, - "delta": funcDelta, - "deriv": funcDeriv, - "exp": funcExp, - "floor": funcFloor, - "histogram_count": funcHistogramCount, - "histogram_fraction": funcHistogramFraction, - "histogram_quantile": funcHistogramQuantile, - "histogram_sum": funcHistogramSum, - "holt_winters": funcHoltWinters, - "hour": funcHour, - "idelta": funcIdelta, - "increase": funcIncrease, - "irate": funcIrate, - "label_replace": funcLabelReplace, - "label_join": funcLabelJoin, - "ln": funcLn, - "log10": funcLog10, - "log2": funcLog2, - "last_over_time": funcLastOverTime, - "max_over_time": funcMaxOverTime, - "min_over_time": funcMinOverTime, - "minute": funcMinute, - "month": funcMonth, - "pi": funcPi, - "predict_linear": funcPredictLinear, - "present_over_time": funcPresentOverTime, - "quantile_over_time": funcQuantileOverTime, - "rad": funcRad, - "rate": funcRate, - "resets": funcResets, - "round": funcRound, - "scalar": funcScalar, - "sgn": funcSgn, - "sin": funcSin, - "sinh": funcSinh, - "sort": funcSort, - "sort_desc": funcSortDesc, - "sqrt": funcSqrt, - "stddev_over_time": funcStddevOverTime, - "stdvar_over_time": funcStdvarOverTime, - "sum_over_time": funcSumOverTime, - "tan": funcTan, - "tanh": funcTanh, - "time": funcTime, - "timestamp": funcTimestamp, - "vector": funcVector, - "year": funcYear, + "abs": funcAbs, + "absent": funcAbsent, + "absent_over_time": funcAbsentOverTime, + "acos": funcAcos, + "acosh": funcAcosh, + "asin": funcAsin, + "asinh": funcAsinh, + "atan": funcAtan, + "atanh": funcAtanh, + "avg_over_time": funcAvgOverTime, + "ceil": funcCeil, + "changes": funcChanges, + "clamp": funcClamp, + "clamp_max": funcClampMax, + "clamp_min": funcClampMin, + "cos": funcCos, + "cosh": funcCosh, + "count_over_time": funcCountOverTime, + "days_in_month": funcDaysInMonth, + "day_of_month": funcDayOfMonth, + "day_of_week": funcDayOfWeek, + "day_of_year": funcDayOfYear, + "deg": funcDeg, + "delta": funcDelta, + "deriv": funcDeriv, + "exp": funcExp, + "floor": funcFloor, + "histogram_avg": funcHistogramAvg, + "histogram_count": funcHistogramCount, + "histogram_fraction": funcHistogramFraction, + "histogram_quantile": funcHistogramQuantile, + "histogram_sum": funcHistogramSum, + "histogram_stddev": funcHistogramStdDev, + "histogram_stdvar": funcHistogramStdVar, + "double_exponential_smoothing": funcDoubleExponentialSmoothing, + "hour": funcHour, + "idelta": funcIdelta, + "increase": funcIncrease, + "irate": funcIrate, + "label_replace": nil, // evalLabelReplace not called via this map. + "label_join": nil, // evalLabelJoin not called via this map. + "ln": funcLn, + "log10": funcLog10, + "log2": funcLog2, + "last_over_time": funcLastOverTime, + "mad_over_time": funcMadOverTime, + "max_over_time": funcMaxOverTime, + "min_over_time": funcMinOverTime, + "minute": funcMinute, + "month": funcMonth, + "pi": funcPi, + "predict_linear": funcPredictLinear, + "present_over_time": funcPresentOverTime, + "quantile_over_time": funcQuantileOverTime, + "rad": funcRad, + "rate": funcRate, + "resets": funcResets, + "round": funcRound, + "scalar": funcScalar, + "sgn": funcSgn, + "sin": funcSin, + "sinh": funcSinh, + "sort": funcSort, + "sort_desc": funcSortDesc, + "sort_by_label": funcSortByLabel, + "sort_by_label_desc": funcSortByLabelDesc, + "sqrt": funcSqrt, + "stddev_over_time": funcStddevOverTime, + "stdvar_over_time": funcStdvarOverTime, + "sum_over_time": funcSumOverTime, + "tan": funcTan, + "tanh": funcTanh, + "time": funcTime, + "timestamp": funcTimestamp, + "vector": funcVector, + "year": funcYear, } // AtModifierUnsafeFunctions are the functions whose result @@ -1550,3 +1857,11 @@ func stringFromArg(e parser.Expr) string { unwrapParenExpr(&tmp) // Optionally unwrap ParenExpr return tmp.(*parser.StringLiteral).Val } + +func stringSliceFromArgs(args parser.Expressions) []string { + tmp := make([]string, len(args)) + for i := 0; i < len(args); i++ { + tmp[i] = stringFromArg(args[i]) + } + return tmp +} diff --git a/promql/functions_internal_test.go b/promql/functions_internal_test.go new file mode 100644 index 0000000000..85c6cd7973 --- /dev/null +++ b/promql/functions_internal_test.go @@ -0,0 +1,81 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestKahanSumInc(t *testing.T) { + testCases := map[string]struct { + first float64 + second float64 + expected float64 + }{ + "+Inf + anything = +Inf": { + first: math.Inf(1), + second: 2.0, + expected: math.Inf(1), + }, + "-Inf + anything = -Inf": { + first: math.Inf(-1), + second: 2.0, + expected: math.Inf(-1), + }, + "+Inf + -Inf = NaN": { + first: math.Inf(1), + second: math.Inf(-1), + expected: math.NaN(), + }, + "NaN + anything = NaN": { + first: math.NaN(), + second: 2, + expected: math.NaN(), + }, + "NaN + Inf = NaN": { + first: math.NaN(), + second: math.Inf(1), + expected: math.NaN(), + }, + "NaN + -Inf = NaN": { + first: math.NaN(), + second: math.Inf(-1), + expected: math.NaN(), + }, + } + + runTest := func(t *testing.T, a, b, expected float64) { + t.Run(fmt.Sprintf("%v + %v = %v", a, b, expected), func(t *testing.T) { + sum, c := kahanSumInc(b, a, 0) + result := sum + c + + if math.IsNaN(expected) { + require.Truef(t, math.IsNaN(result), "expected result to be NaN, but got %v (from %v + %v)", result, sum, c) + } else { + require.Equalf(t, expected, result, "expected result to be %v, but got %v (from %v + %v)", expected, result, sum, c) + } + }) + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + runTest(t, testCase.first, testCase.second, testCase.expected) + runTest(t, testCase.second, testCase.first, testCase.expected) + }) + } +} diff --git a/promql/functions_test.go b/promql/functions_test.go index faf6859e77..9ee0ba51dc 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -11,11 +11,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "context" - "math" "testing" "time" @@ -23,7 +22,9 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/util/teststorage" ) @@ -33,13 +34,13 @@ func TestDeriv(t *testing.T) { // so we test it by hand. storage := teststorage.New(t) defer storage.Close() - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10000, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) a := storage.Appender(context.Background()) @@ -64,25 +65,19 @@ func TestDeriv(t *testing.T) { require.NoError(t, result.Err) vec, _ := result.Vector() - require.Equal(t, 1, len(vec), "Expected 1 result, got %d", len(vec)) + require.Len(t, vec, 1, "Expected 1 result, got %d", len(vec)) require.Equal(t, 0.0, vec[0].F, "Expected 0.0 as value, got %f", vec[0].F) } func TestFunctionList(t *testing.T) { // Test that Functions and parser.Functions list the same functions. - for i := range FunctionCalls { + for i := range promql.FunctionCalls { _, ok := parser.Functions[i] require.True(t, ok, "function %s exists in promql package, but not in parser package", i) } for i := range parser.Functions { - _, ok := FunctionCalls[i] + _, ok := promql.FunctionCalls[i] require.True(t, ok, "function %s exists in parser package, but not in promql package", i) } } - -func TestKahanSum(t *testing.T) { - vals := []float64{1.0, math.Pow(10, 100), 1.0, -1 * math.Pow(10, 100)} - expected := 2.0 - require.Equal(t, expected, kahanSum(vals)) -} diff --git a/promql/fuzz.go b/promql/fuzz.go index aff6eb15b2..57fd1166ac 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -13,7 +13,6 @@ // Only build when go-fuzz is in use //go:build gofuzz -// +build gofuzz package promql @@ -21,6 +20,7 @@ import ( "errors" "io" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/promql/parser" ) @@ -57,8 +57,11 @@ const ( maxInputSize = 10240 ) +// Use package-scope symbol table to avoid memory allocation on every fuzzing operation. +var symbolTable = labels.NewSymbolTable() + func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType, false) + p, warning := textparse.New(in, contentType, false, false, symbolTable) if warning != nil { // An invalid content type is being passed, which should not happen // in this context. diff --git a/promql/fuzz_test.go b/promql/fuzz_test.go index 08a04d98b6..1f0bbaa662 100644 --- a/promql/fuzz_test.go +++ b/promql/fuzz_test.go @@ -13,7 +13,6 @@ // Only build when go-fuzz is in use //go:build gofuzz -// +build gofuzz package promql diff --git a/promql/histogram_stats_iterator.go b/promql/histogram_stats_iterator.go new file mode 100644 index 0000000000..459d5924ae --- /dev/null +++ b/promql/histogram_stats_iterator.go @@ -0,0 +1,150 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/tsdb/chunkenc" +) + +type histogramStatsIterator struct { + chunkenc.Iterator + + currentH *histogram.Histogram + lastH *histogram.Histogram + + currentFH *histogram.FloatHistogram + lastFH *histogram.FloatHistogram +} + +// NewHistogramStatsIterator creates an iterator which returns histogram objects +// which have only their sum and count values populated. The iterator handles +// counter reset detection internally and sets the counter reset hint accordingly +// in each returned histogram objects. +func NewHistogramStatsIterator(it chunkenc.Iterator) chunkenc.Iterator { + return &histogramStatsIterator{ + Iterator: it, + currentH: &histogram.Histogram{}, + currentFH: &histogram.FloatHistogram{}, + } +} + +// AtHistogram returns the next timestamp/histogram pair. The counter reset +// detection is guaranteed to be correct only when the caller does not switch +// between AtHistogram and AtFloatHistogram calls. +func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { + var t int64 + t, f.currentH = f.Iterator.AtHistogram(f.currentH) + if value.IsStaleNaN(f.currentH.Sum) { + h = &histogram.Histogram{Sum: f.currentH.Sum} + return t, h + } + + if h == nil { + h = &histogram.Histogram{ + CounterResetHint: f.getResetHint(f.currentH), + Count: f.currentH.Count, + Sum: f.currentH.Sum, + } + f.setLastH(f.currentH) + return t, h + } + + returnValue := histogram.Histogram{ + CounterResetHint: f.getResetHint(f.currentH), + Count: f.currentH.Count, + Sum: f.currentH.Sum, + } + returnValue.CopyTo(h) + + f.setLastH(f.currentH) + return t, h +} + +// AtFloatHistogram returns the next timestamp/float histogram pair. The counter +// reset detection is guaranteed to be correct only when the caller does not +// switch between AtHistogram and AtFloatHistogram calls. +func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { + var t int64 + t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH) + if value.IsStaleNaN(f.currentFH.Sum) { + return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum} + } + + if fh == nil { + fh = &histogram.FloatHistogram{ + CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint), + Count: f.currentFH.Count, + Sum: f.currentFH.Sum, + } + f.setLastFH(f.currentFH) + return t, fh + } + + returnValue := histogram.FloatHistogram{ + CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint), + Count: f.currentFH.Count, + Sum: f.currentFH.Sum, + } + returnValue.CopyTo(fh) + + f.setLastFH(f.currentFH) + return t, fh +} + +func (f *histogramStatsIterator) setLastH(h *histogram.Histogram) { + if f.lastH == nil { + f.lastH = h.Copy() + } else { + h.CopyTo(f.lastH) + } +} + +func (f *histogramStatsIterator) setLastFH(fh *histogram.FloatHistogram) { + if f.lastFH == nil { + f.lastFH = fh.Copy() + } else { + fh.CopyTo(f.lastFH) + } +} + +func (f *histogramStatsIterator) getFloatResetHint(hint histogram.CounterResetHint) histogram.CounterResetHint { + if hint != histogram.UnknownCounterReset { + return hint + } + if f.lastFH == nil { + return histogram.NotCounterReset + } + + if f.currentFH.DetectReset(f.lastFH) { + return histogram.CounterReset + } + return histogram.NotCounterReset +} + +func (f *histogramStatsIterator) getResetHint(h *histogram.Histogram) histogram.CounterResetHint { + if h.CounterResetHint != histogram.UnknownCounterReset { + return h.CounterResetHint + } + if f.lastH == nil { + return histogram.NotCounterReset + } + + fh, prevFH := h.ToFloat(nil), f.lastH.ToFloat(nil) + if fh.DetectReset(prevFH) { + return histogram.CounterReset + } + return histogram.NotCounterReset +} diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go new file mode 100644 index 0000000000..7a2953d3e2 --- /dev/null +++ b/promql/histogram_stats_iterator_test.go @@ -0,0 +1,191 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" +) + +func TestHistogramStatsDecoding(t *testing.T) { + cases := []struct { + name string + histograms []*histogram.Histogram + expectedHints []histogram.CounterResetHint + }{ + { + name: "unknown counter reset triggers detection", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(2, histogram.CounterReset), + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.NotCounterReset, + histogram.CounterReset, + histogram.NotCounterReset, + }, + }, + { + name: "stale sample before unknown reset hint", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + {Sum: math.Float64frombits(value.StaleNaN)}, + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.NotCounterReset, + histogram.UnknownCounterReset, + histogram.NotCounterReset, + }, + }, + { + name: "unknown counter reset at the beginning", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + }, + }, + { + name: "detect real counter reset", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.CounterReset, + }, + }, + { + name: "detect real counter reset after stale NaN", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + {Sum: math.Float64frombits(value.StaleNaN)}, + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.UnknownCounterReset, + histogram.CounterReset, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Run("histogram_stats", func(t *testing.T) { + decodedStats := make([]*histogram.Histogram, 0) + statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil)) + for statsIterator.Next() != chunkenc.ValNone { + _, h := statsIterator.AtHistogram(nil) + decodedStats = append(decodedStats, h) + } + for i := 0; i < len(tc.histograms); i++ { + require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint, fmt.Sprintf("mismatch in counter reset hint for histogram %d", i)) + h := tc.histograms[i] + if value.IsStaleNaN(h.Sum) { + require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) + require.Equal(t, uint64(0), decodedStats[i].Count) + } else { + require.Equal(t, tc.histograms[i].Count, decodedStats[i].Count) + require.Equal(t, tc.histograms[i].Sum, decodedStats[i].Sum) + } + } + }) + t.Run("float_histogram_stats", func(t *testing.T) { + decodedStats := make([]*histogram.FloatHistogram, 0) + statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil)) + for statsIterator.Next() != chunkenc.ValNone { + _, h := statsIterator.AtFloatHistogram(nil) + decodedStats = append(decodedStats, h) + } + for i := 0; i < len(tc.histograms); i++ { + require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint) + fh := tc.histograms[i].ToFloat(nil) + if value.IsStaleNaN(fh.Sum) { + require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) + require.Equal(t, float64(0), decodedStats[i].Count) + } else { + require.Equal(t, fh.Count, decodedStats[i].Count) + require.Equal(t, fh.Sum, decodedStats[i].Sum) + } + } + }) + }) + } +} + +type histogramSeries struct { + histograms []*histogram.Histogram +} + +func newHistogramSeries(histograms []*histogram.Histogram) *histogramSeries { + return &histogramSeries{ + histograms: histograms, + } +} + +func (m histogramSeries) Labels() labels.Labels { return labels.EmptyLabels() } + +func (m histogramSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { + return &histogramIterator{ + i: -1, + histograms: m.histograms, + } +} + +type histogramIterator struct { + i int + histograms []*histogram.Histogram +} + +func (h *histogramIterator) Next() chunkenc.ValueType { + h.i++ + if h.i < len(h.histograms) { + return chunkenc.ValHistogram + } + return chunkenc.ValNone +} + +func (h *histogramIterator) Seek(t int64) chunkenc.ValueType { panic("not implemented") } + +func (h *histogramIterator) At() (int64, float64) { panic("not implemented") } + +func (h *histogramIterator) AtHistogram(_ *histogram.Histogram) (int64, *histogram.Histogram) { + return 0, h.histograms[h.i] +} + +func (h *histogramIterator) AtFloatHistogram(_ *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { + return 0, h.histograms[h.i].ToFloat(nil) +} + +func (h *histogramIterator) AtT() int64 { return 0 } + +func (h *histogramIterator) Err() error { return nil } diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 86f1394998..162d7817ab 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -20,6 +20,8 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + + "github.com/prometheus/prometheus/promql/parser/posrange" ) // Node is a generic interface for all nodes in an AST. @@ -45,7 +47,7 @@ type Node interface { Pretty(level int) string // PositionRange returns the position of the AST Node in the query string. - PositionRange() PositionRange + PositionRange() posrange.PositionRange } // Statement is a generic interface for all statements. @@ -53,7 +55,6 @@ type Statement interface { Node // PromQLStmt ensures that no other type accidentally implements the interface - // nolint:unused PromQLStmt() } @@ -94,7 +95,7 @@ type AggregateExpr struct { Param Expr // Parameter used by some aggregators. Grouping []string // The labels by which to group the Vector. Without bool // Whether to drop the given labels rather than keep them. - PosRange PositionRange + PosRange posrange.PositionRange } // BinaryExpr represents a binary expression between two child expressions. @@ -115,7 +116,7 @@ type Call struct { Func *Function // The function that was called. Args Expressions // Arguments used in the call. - PosRange PositionRange + PosRange posrange.PositionRange } // MatrixSelector represents a Matrix selection. @@ -125,7 +126,7 @@ type MatrixSelector struct { VectorSelector Expr Range time.Duration - EndPos Pos + EndPos posrange.Pos } // SubqueryExpr represents a subquery. @@ -143,27 +144,27 @@ type SubqueryExpr struct { StartOrEnd ItemType // Set when @ is used with start() or end() Step time.Duration - EndPos Pos + EndPos posrange.Pos } // NumberLiteral represents a number. type NumberLiteral struct { Val float64 - PosRange PositionRange + PosRange posrange.PositionRange } // ParenExpr wraps an expression so it cannot be disassembled as a consequence // of operator precedence. type ParenExpr struct { Expr Expr - PosRange PositionRange + PosRange posrange.PositionRange } // StringLiteral represents a string. type StringLiteral struct { Val string - PosRange PositionRange + PosRange posrange.PositionRange } // UnaryExpr represents a unary operation on another expression. @@ -172,7 +173,7 @@ type UnaryExpr struct { Op ItemType Expr Expr - StartPos Pos + StartPos posrange.Pos } // StepInvariantExpr represents a query which evaluates to the same result @@ -184,7 +185,9 @@ type StepInvariantExpr struct { func (e *StepInvariantExpr) String() string { return e.Expr.String() } -func (e *StepInvariantExpr) PositionRange() PositionRange { return e.Expr.PositionRange() } +func (e *StepInvariantExpr) PositionRange() posrange.PositionRange { + return e.Expr.PositionRange() +} // VectorSelector represents a Vector selection. type VectorSelector struct { @@ -195,16 +198,17 @@ type VectorSelector struct { // Offset is the offset used during the query execution // which is calculated using the original offset, at modifier time, // eval time, and subquery offsets in the AST tree. - Offset time.Duration - Timestamp *int64 - StartOrEnd ItemType // Set when @ is used with start() or end() - LabelMatchers []*labels.Matcher + Offset time.Duration + Timestamp *int64 + SkipHistogramBuckets bool // Set when decoding native histogram buckets is not needed for query evaluation. + StartOrEnd ItemType // Set when @ is used with start() or end() + LabelMatchers []*labels.Matcher // The unexpanded seriesSet populated at query preparation time. UnexpandedSeriesSet storage.SeriesSet Series []storage.Series - PosRange PositionRange + PosRange posrange.PositionRange } // TestStmt is an internal helper statement that allows execution @@ -215,8 +219,8 @@ func (TestStmt) String() string { return "test statement" } func (TestStmt) PromQLStmt() {} func (t TestStmt) Pretty(int) string { return t.String() } -func (TestStmt) PositionRange() PositionRange { - return PositionRange{ +func (TestStmt) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: -1, End: -1, } @@ -348,8 +352,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) { // f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f // for all the non-nil children of node, recursively. func Inspect(node Node, f inspector) { - //nolint: errcheck - Walk(f, node, nil) + Walk(f, node, nil) //nolint:errcheck } // Children returns a list of all child nodes of a syntax tree node. @@ -405,51 +408,45 @@ func Children(node Node) []Node { } } -// PositionRange describes a position in the input string of the parser. -type PositionRange struct { - Start Pos - End Pos -} - // mergeRanges is a helper function to merge the PositionRanges of two Nodes. // Note that the arguments must be in the same order as they // occur in the input string. -func mergeRanges(first, last Node) PositionRange { - return PositionRange{ +func mergeRanges(first, last Node) posrange.PositionRange { + return posrange.PositionRange{ Start: first.PositionRange().Start, End: last.PositionRange().End, } } -// Item implements the Node interface. +// PositionRange implements the Node interface. // This makes it possible to call mergeRanges on them. -func (i *Item) PositionRange() PositionRange { - return PositionRange{ +func (i *Item) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: i.Pos, - End: i.Pos + Pos(len(i.Val)), + End: i.Pos + posrange.Pos(len(i.Val)), } } -func (e *AggregateExpr) PositionRange() PositionRange { +func (e *AggregateExpr) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *BinaryExpr) PositionRange() PositionRange { +func (e *BinaryExpr) PositionRange() posrange.PositionRange { return mergeRanges(e.LHS, e.RHS) } -func (e *Call) PositionRange() PositionRange { +func (e *Call) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *EvalStmt) PositionRange() PositionRange { +func (e *EvalStmt) PositionRange() posrange.PositionRange { return e.Expr.PositionRange() } -func (e Expressions) PositionRange() PositionRange { +func (e Expressions) PositionRange() posrange.PositionRange { if len(e) == 0 { // Position undefined. - return PositionRange{ + return posrange.PositionRange{ Start: -1, End: -1, } @@ -457,39 +454,39 @@ func (e Expressions) PositionRange() PositionRange { return mergeRanges(e[0], e[len(e)-1]) } -func (e *MatrixSelector) PositionRange() PositionRange { - return PositionRange{ +func (e *MatrixSelector) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.VectorSelector.PositionRange().Start, End: e.EndPos, } } -func (e *SubqueryExpr) PositionRange() PositionRange { - return PositionRange{ +func (e *SubqueryExpr) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.Expr.PositionRange().Start, End: e.EndPos, } } -func (e *NumberLiteral) PositionRange() PositionRange { +func (e *NumberLiteral) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *ParenExpr) PositionRange() PositionRange { +func (e *ParenExpr) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *StringLiteral) PositionRange() PositionRange { +func (e *StringLiteral) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *UnaryExpr) PositionRange() PositionRange { - return PositionRange{ +func (e *UnaryExpr) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.StartPos, End: e.Expr.PositionRange().End, } } -func (e *VectorSelector) PositionRange() PositionRange { +func (e *VectorSelector) PositionRange() posrange.PositionRange { return e.PosRange } diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 479c7f635d..9d7b560537 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -16,12 +16,16 @@ package parser // Function represents a function of the expression language and is // used by function nodes. type Function struct { - Name string - ArgTypes []ValueType - Variadic int - ReturnType ValueType + Name string + ArgTypes []ValueType + Variadic int + ReturnType ValueType + Experimental bool } +// EnableExperimentalFunctions controls whether experimentalFunctions are enabled. +var EnableExperimentalFunctions bool + // Functions is a list of all functions supported by PromQL, including their types. var Functions = map[string]*Function{ "abs": { @@ -163,6 +167,11 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "histogram_avg": { + Name: "histogram_avg", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "histogram_count": { Name: "histogram_count", ArgTypes: []ValueType{ValueTypeVector}, @@ -173,6 +182,16 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "histogram_stddev": { + Name: "histogram_stddev", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_stdvar": { + Name: "histogram_stdvar", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "histogram_fraction": { Name: "histogram_fraction", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, @@ -183,10 +202,11 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, ReturnType: ValueTypeVector, }, - "holt_winters": { - Name: "holt_winters", - ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, - ReturnType: ValueTypeVector, + "double_exponential_smoothing": { + Name: "double_exponential_smoothing", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Experimental: true, }, "hour": { Name: "hour", @@ -240,6 +260,12 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "mad_over_time": { + Name: "mad_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Experimental: true, + }, "max_over_time": { Name: "max_over_time", ArgTypes: []ValueType{ValueTypeMatrix}, @@ -333,6 +359,20 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "sort_by_label": { + Name: "sort_by_label", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeString}, + Variadic: -1, + ReturnType: ValueTypeVector, + Experimental: true, + }, + "sort_by_label_desc": { + Name: "sort_by_label_desc", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeString}, + Variadic: -1, + ReturnType: ValueTypeVector, + Experimental: true, + }, "sqrt": { Name: "sqrt", ArgTypes: []ValueType{ValueTypeVector}, diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index b28e9d544c..befb9bdf3e 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -21,23 +21,30 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/promql/parser/posrange" + + "github.com/prometheus/common/model" ) %} %union { - node Node - item Item - matchers []*labels.Matcher - matcher *labels.Matcher - label labels.Label - labels labels.Labels - lblList []labels.Label - strings []string - series []SequenceValue - uint uint64 - float float64 - duration time.Duration + node Node + item Item + matchers []*labels.Matcher + matcher *labels.Matcher + label labels.Label + labels labels.Labels + lblList []labels.Label + strings []string + series []SequenceValue + histogram *histogram.FloatHistogram + descriptors map[string]interface{} + bucket_set []float64 + int int64 + uint uint64 + float float64 } @@ -54,6 +61,8 @@ IDENTIFIER LEFT_BRACE LEFT_BRACKET LEFT_PAREN +OPEN_HIST +CLOSE_HIST METRIC_IDENTIFIER NUMBER RIGHT_BRACE @@ -64,6 +73,22 @@ SPACE STRING TIMES +// Histogram Descriptors. +%token histogramDescStart +%token +SUM_DESC +COUNT_DESC +SCHEMA_DESC +OFFSET_DESC +NEGATIVE_OFFSET_DESC +BUCKETS_DESC +NEGATIVE_BUCKETS_DESC +ZERO_BUCKET_DESC +ZERO_BUCKET_WIDTH_DESC +CUSTOM_VALUES_DESC +COUNTER_RESET_HINT_DESC +%token histogramDescEnd + // Operators. %token operatorsStart %token @@ -103,6 +128,8 @@ STDDEV STDVAR SUM TOPK +LIMITK +LIMIT_RATIO %token aggregatorsEnd // Keywords. @@ -125,6 +152,14 @@ START END %token preprocessorEnd +// Counter reset hints. +%token counterResetHintsStart +%token +UNKNOWN_COUNTER_RESET +COUNTER_RESET +NOT_COUNTER_RESET +GAUGE_TYPE +%token counterResetHintsEnd // Start symbols for the generated parser. %token startSymbolsStart @@ -139,16 +174,19 @@ START_METRIC_SELECTOR // Type definitions for grammar rules. %type label_match_list %type label_matcher -%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors +%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint %type label_set metric %type label_set_list %type