From 9ddb21fccb829d5a0c01492901ffdc912b2eeb4b Mon Sep 17 00:00:00 2001 From: jingchanglu Date: Thu, 10 Jul 2025 14:43:25 +0800 Subject: [PATCH 01/89] chore: fix some function names in comment Signed-off-by: jingchanglu --- model/histogram/float_histogram.go | 2 +- web/api/v1/json_codec.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 92f084bdf6..55ffbc08b3 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -975,7 +975,7 @@ func (h *FloatHistogram) floatBucketIterator( return i } -// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators. +// newReverseFloatBucketIterator is a low-level constructor for reverse bucket iterators. func newReverseFloatBucketIterator( spans []Span, buckets []float64, schema int32, positive bool, customValues []float64, ) reverseFloatBucketIterator { diff --git a/web/api/v1/json_codec.go b/web/api/v1/json_codec.go index 6bd095a8f3..8dcfd8a062 100644 --- a/web/api/v1/json_codec.go +++ b/web/api/v1/json_codec.go @@ -51,7 +51,7 @@ func (j JSONCodec) Encode(resp *Response) ([]byte, error) { return json.Marshal(resp) } -// marshalSeriesJSON writes something like the following: +// unsafeMarshalSeriesJSON writes something like the following: // // { // "metric" : { @@ -108,7 +108,7 @@ func neverEmpty(unsafe.Pointer) bool { return false } -// marshalSampleJSON writes something like the following for normal value samples: +// unsafeMarshalSampleJSON writes something like the following for normal value samples: // // { // "metric" : { From 83846b57386cce5e783b69c7938ac771e12df483 Mon Sep 17 00:00:00 2001 From: xander Date: Wed, 6 Aug 2025 15:28:48 +0100 Subject: [PATCH 02/89] chore: update fsnotify Signed-off-by: xander --- go.mod | 5 +---- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index e993819936..49f5487afd 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/envoyproxy/go-control-plane/envoy v1.32.4 github.com/envoyproxy/protoc-gen-validate v1.2.1 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb - github.com/fsnotify/fsnotify v1.8.0 + github.com/fsnotify/fsnotify v1.9.0 github.com/go-openapi/strfmt v0.23.0 github.com/go-zookeeper/zk v1.0.4 github.com/gogo/protobuf v1.3.2 @@ -246,6 +246,3 @@ exclude ( github.com/grpc-ecosystem/grpc-gateway v1.14.7 google.golang.org/api v0.30.0 ) - -// Pin until https://github.com/fsnotify/fsnotify/issues/656 is resolved. -replace github.com/fsnotify/fsnotify v1.8.0 => github.com/fsnotify/fsnotify v1.7.0 diff --git a/go.sum b/go.sum index 14a9b22ff1..d61c5e9416 100644 --- a/go.sum +++ b/go.sum @@ -139,8 +139,8 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= From 8e48f43e0662e54c2c3cfb712502fe43486c353b Mon Sep 17 00:00:00 2001 From: cuiweixie Date: Thu, 21 Aug 2025 13:38:50 +0800 Subject: [PATCH 03/89] discovery: refactor to use reflect.TypeFor Signed-off-by: cuiweixie --- discovery/registry.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/discovery/registry.go b/discovery/registry.go index 92fa3d3d16..ae32cb7c0a 100644 --- a/discovery/registry.go +++ b/discovery/registry.go @@ -42,8 +42,8 @@ var ( configTypesMu sync.Mutex configTypes = make(map[reflect.Type]reflect.Type) - emptyStructType = reflect.TypeOf(struct{}{}) - configsType = reflect.TypeOf(Configs{}) + emptyStructType = reflect.TypeFor[struct{}]() + configsType = reflect.TypeFor[Configs]() ) // RegisterConfig registers the given Config type for YAML marshaling and unmarshaling. @@ -54,7 +54,7 @@ func RegisterConfig(config Config) { func init() { // N.B.: static_configs is the only Config type implemented by default. // All other types are registered at init by their implementing packages. - elemTyp := reflect.TypeOf(&targetgroup.Group{}) + elemTyp := reflect.TypeFor[*targetgroup.Group]() registerConfig(staticConfigsKey, elemTyp, StaticConfig{}) } From 157ed00d9d7be4b74c3a59191e13fbc43b0c15e3 Mon Sep 17 00:00:00 2001 From: machine424 Date: Mon, 4 Aug 2025 11:46:25 +0200 Subject: [PATCH 04/89] chore: prepare release 3.6.0-rc.0 Signed-off-by: machine424 --- CHANGELOG.md | 25 +++++++++++++++++++- VERSION | 2 +- web/ui/mantine-ui/package.json | 4 ++-- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++------ web/ui/package.json | 2 +- 7 files changed, 38 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index adb6b0fe0f..9cfa22604f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,30 @@ ## main / unreleased -* [BUGFIX] OTLP receiver: Generate `target_info` samples between the earliest and latest samples per resource. #16737 +## 3.6.0-rc.0 / 2025-08-12 + +* [FEATURE] PromQL: Add `step()`, `min()` and `max()` behind feature flag `promql-duration-expr`. #16777 +* [FEATURE] API: Add a `/v1/status/tsdb/blocks` endpoint exposing metadata about loaded blocks. #16695 +* [FEATURE] Templates: Add `toDuration()` and `now()` functions. #16619 +* [ENHANCEMENT] Discovery: Add support for attaching namespace metadata to targets. #16831 +* [ENHANCEMENT] OTLP: Support new `UnderscoreEscapingWithoutSuffixes` strategy via `otlp.translation_strategy`. #16849 +* [ENHANCEMENT] OTLP: Support including scope metadata as metric labels via `otlp.promote_scope_metadata`. #16878 +* [ENHANCEMENT] OTLP: Add `__type__` and `__unit__` labels when feature flag `type-and-unit-labels` is enabled. #16630 +* [ENHANCEMENT] Tracing: Send the traceparent HTTP header during scrapes. #16425 +* [ENHANCEMENT] UI: Add option to disable info and warning query messages under `Query page settings`. #16901 +* [ENHANCEMENT] UI: Improve metadata handling for `_count/_sum/_bucket` suffixes. #16910 +* [PERF] PromQL: Improve performance due to internal optimizations. #16797 +* [BUGFIX] Config: Fix `metric_name_escaping_scheme` related error messages produced during validation. #16801 +* [BUGFIX] Discovery: Fix race condition during shutdown. #16820 +* [BUGFIX] OTLP: Generate `target_info` samples between the earliest and latest samples per resource. #16737 +* [BUGFIX] PromQL: Fail when `NaN` is passed as parameter to `topk()`, `bottomk()`, `limitk()` and `limit_ratio()`. #16725 +* [BUGFIX] PromQL: Fix extrapolation for native counter histograms. #16828 +* [BUGFIX] PromQL: Reduce numerical errors by disabling some optimizations. #16895 +* [BUGFIX] PromQL: Fix inconsistencies when using native histograms in subqueries. #16879 +* [BUGFIX] PromQL: Fix inconsistent annotations for `rate()` and `increase()` on histograms when feature flag `type-and-unit-labels` is enabled. #16915 +* [BUGFIX] Scraping: Fix memory corruption in `slicelabels` builds. #16946 +* [BUGFIX] TSDB: Fix panic on append when feature flag `created-timestamp-zero-ingestion` is enabled. #16318 +* [BUGFIX] TSDB: Fix panic on append for native histograms with empty buckets. #16893 ## 3.5.0 / 2025-07-14 diff --git a/VERSION b/VERSION index 1545d96657..0b3d28b729 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.5.0 +3.6.0-rc.0 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 53a6e79612..404f1db99a 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.305.0", + "version": "0.306.0-rc.0", "type": "module", "scripts": { "start": "vite", @@ -28,7 +28,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.305.0", + "@prometheus-io/codemirror-promql": "0.306.0-rc.0", "@reduxjs/toolkit": "^2.7.0", "@tabler/icons-react": "^3.31.0", "@tanstack/react-query": "^5.74.7", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index bc1d5f0ccf..7619365062 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.305.0", + "version": "0.306.0-rc.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.305.0", + "@prometheus-io/lezer-promql": "0.306.0-rc.0", "lru-cache": "^11.1.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 1dedee5be5..a3605833af 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.305.0", + "version": "0.306.0-rc.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f1140e0915..f9f210d945 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.305.0", + "version": "0.306.0-rc.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.305.0", + "version": "0.306.0-rc.0", "workspaces": [ "mantine-ui", "module/*" @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.305.0", + "version": "0.306.0-rc.0", "dependencies": { "@codemirror/autocomplete": "^6.18.6", "@codemirror/language": "^6.11.0", @@ -42,7 +42,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.305.0", + "@prometheus-io/codemirror-promql": "0.306.0-rc.0", "@reduxjs/toolkit": "^2.7.0", "@tabler/icons-react": "^3.31.0", "@tanstack/react-query": "^5.74.7", @@ -189,10 +189,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.305.0", + "version": "0.306.0-rc.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.305.0", + "@prometheus-io/lezer-promql": "0.306.0-rc.0", "lru-cache": "^11.1.0" }, "devDependencies": { @@ -222,7 +222,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.305.0", + "version": "0.306.0-rc.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.3", diff --git a/web/ui/package.json b/web/ui/package.json index d5a6634c36..a5a1190db6 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.305.0", + "version": "0.306.0-rc.0", "private": true, "scripts": { "build": "bash build_ui.sh --all", From 94b4c49a769d85e385316b6a670d312d898cdec5 Mon Sep 17 00:00:00 2001 From: machine424 Date: Wed, 13 Aug 2025 12:15:49 +0200 Subject: [PATCH 05/89] apply bboreham's suggestions Signed-off-by: machine424 --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cfa22604f..a9a0ca96a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ ## 3.6.0-rc.0 / 2025-08-12 -* [FEATURE] PromQL: Add `step()`, `min()` and `max()` behind feature flag `promql-duration-expr`. #16777 +* [FEATURE] PromQL: Add `step()`, and `min()` and `max()` on durations, behind feature flag `promql-duration-expr`. #16777 * [FEATURE] API: Add a `/v1/status/tsdb/blocks` endpoint exposing metadata about loaded blocks. #16695 * [FEATURE] Templates: Add `toDuration()` and `now()` functions. #16619 * [ENHANCEMENT] Discovery: Add support for attaching namespace metadata to targets. #16831 @@ -15,7 +15,7 @@ * [ENHANCEMENT] UI: Add option to disable info and warning query messages under `Query page settings`. #16901 * [ENHANCEMENT] UI: Improve metadata handling for `_count/_sum/_bucket` suffixes. #16910 * [PERF] PromQL: Improve performance due to internal optimizations. #16797 -* [BUGFIX] Config: Fix `metric_name_escaping_scheme` related error messages produced during validation. #16801 +* [BUGFIX] Config: Fix "unknown global name escaping method" error messages produced during config validation. #16801 * [BUGFIX] Discovery: Fix race condition during shutdown. #16820 * [BUGFIX] OTLP: Generate `target_info` samples between the earliest and latest samples per resource. #16737 * [BUGFIX] PromQL: Fail when `NaN` is passed as parameter to `topk()`, `bottomk()`, `limitk()` and `limit_ratio()`. #16725 From 9855613435f551d0f170d3cf3c85b4f766897550 Mon Sep 17 00:00:00 2001 From: machine424 Date: Wed, 13 Aug 2025 12:38:14 +0200 Subject: [PATCH 06/89] fix PR number Signed-off-by: machine424 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9a0ca96a4..064d8b2482 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,7 @@ * [BUGFIX] PromQL: Fix inconsistencies when using native histograms in subqueries. #16879 * [BUGFIX] PromQL: Fix inconsistent annotations for `rate()` and `increase()` on histograms when feature flag `type-and-unit-labels` is enabled. #16915 * [BUGFIX] Scraping: Fix memory corruption in `slicelabels` builds. #16946 -* [BUGFIX] TSDB: Fix panic on append when feature flag `created-timestamp-zero-ingestion` is enabled. #16318 +* [BUGFIX] TSDB: Fix panic on append when feature flag `created-timestamp-zero-ingestion` is enabled. #16332 * [BUGFIX] TSDB: Fix panic on append for native histograms with empty buckets. #16893 ## 3.5.0 / 2025-07-14 From 4a37fd886f789128b6c4ecff02df821c3aceecf4 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 24 Jul 2025 17:38:18 -0700 Subject: [PATCH 07/89] Track stale series in the Head Signed-off-by: Ganesh Vernekar --- tsdb/head.go | 27 ++++++++++++++++++++++++--- tsdb/head_append.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 3 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index 7763d272b7..574305a287 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -36,6 +36,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -68,6 +69,7 @@ var ( type Head struct { chunkRange atomic.Int64 numSeries atomic.Uint64 + numStaleSeries atomic.Uint64 minOOOTime, maxOOOTime atomic.Int64 // TODO(jesusvazquez) These should be updated after garbage collection. minTime, maxTime atomic.Int64 // Current min and max of the samples included in the head. TODO(jesusvazquez) Ensure these are properly tracked. minValidTime atomic.Int64 // Mint allowed to be added to the head. It shouldn't be lower than the maxt of the last persisted block. @@ -360,6 +362,7 @@ func (h *Head) resetWLReplayResources() { type headMetrics struct { activeAppenders prometheus.Gauge series prometheus.GaugeFunc + staleSeries prometheus.GaugeFunc seriesCreated prometheus.Counter seriesRemoved prometheus.Counter seriesNotFound prometheus.Counter @@ -406,6 +409,12 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { }, func() float64 { return float64(h.NumSeries()) }), + staleSeries: prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_head_stale_series", + Help: "Total number of stale series in the head block.", + }, func() float64 { + return float64(h.NumStaleSeries()) + }), seriesCreated: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_series_created_total", Help: "Total number of series created in the head", @@ -532,6 +541,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { r.MustRegister( m.activeAppenders, m.series, + m.staleSeries, m.chunks, m.chunksCreated, m.chunksRemoved, @@ -1607,7 +1617,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) { // Drop old chunks and remember series IDs and hashes if they can be // deleted entirely. - deleted, affected, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef) + deleted, affected, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef, &h.numStaleSeries) seriesRemoved := len(deleted) h.metrics.seriesRemoved.Add(float64(seriesRemoved)) @@ -1645,11 +1655,16 @@ func (h *Head) Tombstones() (tombstones.Reader, error) { return h.tombstones, nil } -// NumSeries returns the number of active series in the head. +// NumSeries returns the number of series tracked in the head. func (h *Head) NumSeries() uint64 { return h.numSeries.Load() } +// NumStaleSeries returns the number of stale series in the head. +func (h *Head) NumStaleSeries() uint64 { + return h.numStaleSeries.Load() +} + var headULID = ulid.MustParse("0000000000XXXXXXXXXXXXHEAD") // Meta returns meta information about the head. @@ -1929,7 +1944,7 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st // but the returned map goes into postings.Delete() which expects a map[storage.SeriesRef]struct // and there's no easy way to cast maps. // minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. -func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ map[labels.Label]struct{}, _ int, _, _ int64, minMmapFile int) { +func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef, numStaleSeries *atomic.Uint64) (_ map[storage.SeriesRef]struct{}, _ map[labels.Label]struct{}, _ int, _, _ int64, minMmapFile int) { var ( deleted = map[storage.SeriesRef]struct{}{} affected = map[labels.Label]struct{}{} @@ -1987,6 +2002,12 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( defer s.locks[refShard].Unlock() } + if value.IsStaleNaN(series.lastValue) || + (series.lastHistogramValue != nil && value.IsStaleNaN(series.lastHistogramValue.Sum)) || + (series.lastFloatHistogramValue != nil && value.IsStaleNaN(series.lastFloatHistogramValue.Sum)) { + numStaleSeries.Dec() + } + deleted[storage.SeriesRef(series.ref)] = struct{}{} series.lset.Range(func(l labels.Label) { affected[l] = struct{}{} }) s.hashes[hashShard].del(hash, series.ref) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 05299f048d..fa44f752f2 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1222,6 +1222,8 @@ func (a *headAppender) commitSamples(acc *appenderCommitContext) { acc.floatsAppended-- } default: + newlyStale := !value.IsStaleNaN(series.lastValue) && value.IsStaleNaN(s.V) + staleToNonStale := value.IsStaleNaN(series.lastValue) && !value.IsStaleNaN(s.V) ok, chunkCreated = series.append(s.T, s.V, a.appendID, acc.appendChunkOpts) if ok { if s.T < acc.inOrderMint { @@ -1230,6 +1232,12 @@ func (a *headAppender) commitSamples(acc *appenderCommitContext) { if s.T > acc.inOrderMaxt { acc.inOrderMaxt = s.T } + if newlyStale { + a.head.numStaleSeries.Inc() + } + if staleToNonStale { + a.head.numStaleSeries.Dec() + } } else { // The sample is an exact duplicate, and should be silently dropped. acc.floatsAppended-- @@ -1310,6 +1318,12 @@ func (a *headAppender) commitHistograms(acc *appenderCommitContext) { acc.histogramsAppended-- } default: + newlyStale := value.IsStaleNaN(s.H.Sum) + staleToNonStale := false + if series.lastHistogramValue != nil { + newlyStale = newlyStale && !value.IsStaleNaN(series.lastHistogramValue.Sum) + staleToNonStale = value.IsStaleNaN(series.lastHistogramValue.Sum) && !value.IsStaleNaN(s.H.Sum) + } ok, chunkCreated = series.appendHistogram(s.T, s.H, a.appendID, acc.appendChunkOpts) if ok { if s.T < acc.inOrderMint { @@ -1318,6 +1332,12 @@ func (a *headAppender) commitHistograms(acc *appenderCommitContext) { if s.T > acc.inOrderMaxt { acc.inOrderMaxt = s.T } + if newlyStale { + a.head.numStaleSeries.Inc() + } + if staleToNonStale { + a.head.numStaleSeries.Dec() + } } else { acc.histogramsAppended-- acc.histoOOORejected++ @@ -1398,6 +1418,12 @@ func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) { acc.histogramsAppended-- } default: + newlyStale := value.IsStaleNaN(s.FH.Sum) + staleToNonStale := false + if series.lastFloatHistogramValue != nil { + newlyStale = newlyStale && !value.IsStaleNaN(series.lastFloatHistogramValue.Sum) + staleToNonStale = value.IsStaleNaN(series.lastFloatHistogramValue.Sum) && !value.IsStaleNaN(s.FH.Sum) + } ok, chunkCreated = series.appendFloatHistogram(s.T, s.FH, a.appendID, acc.appendChunkOpts) if ok { if s.T < acc.inOrderMint { @@ -1406,6 +1432,12 @@ func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) { if s.T > acc.inOrderMaxt { acc.inOrderMaxt = s.T } + if newlyStale { + a.head.numStaleSeries.Inc() + } + if staleToNonStale { + a.head.numStaleSeries.Dec() + } } else { acc.histogramsAppended-- acc.histoOOORejected++ From 787fe92e8621b259e828a59eb0794d81973f3923 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 24 Jul 2025 17:38:50 -0700 Subject: [PATCH 08/89] Test the stale series tracking in Head Signed-off-by: Ganesh Vernekar --- tsdb/head_test.go | 129 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 128 insertions(+), 1 deletion(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 73f67f4e8a..b1856c0419 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -6435,7 +6435,7 @@ func TestStripeSeries_gc(t *testing.T) { s, ms1, ms2 := stripeSeriesWithCollidingSeries(t) hash := ms1.lset.Hash() - s.gc(0, 0) + s.gc(0, 0, nil) // Verify that we can get neither ms1 nor ms2 after gc-ing corresponding series got := s.getByHash(hash, ms1.lset) @@ -6866,3 +6866,130 @@ func testHeadAppendHistogramAndCommitConcurrency(t *testing.T, appendFn func(sto wg.Wait() } + +func TestHead_NumStaleSeries(t *testing.T) { + head, _ := newTestHead(t, 1000, compression.None, false) + t.Cleanup(func() { + require.NoError(t, head.Close()) + }) + require.NoError(t, head.Init(0)) + + // Initially, no series should be stale. + require.Equal(t, uint64(0), head.NumStaleSeries()) + + appendSample := func(lbls labels.Labels, ts int64, val float64) { + app := head.Appender(context.Background()) + _, err := app.Append(0, lbls, ts, val) + require.NoError(t, err) + require.NoError(t, app.Commit()) + } + appendHistogram := func(lbls labels.Labels, ts int64, val *histogram.Histogram) { + app := head.Appender(context.Background()) + _, err := app.AppendHistogram(0, lbls, ts, val, nil) + require.NoError(t, err) + require.NoError(t, app.Commit()) + } + appendFloatHistogram := func(lbls labels.Labels, ts int64, val *histogram.FloatHistogram) { + app := head.Appender(context.Background()) + _, err := app.AppendHistogram(0, lbls, ts, nil, val) + require.NoError(t, err) + require.NoError(t, app.Commit()) + } + + verifySeriesCounts := func(numStaleSeries, numSeries int) { + require.Equal(t, uint64(numStaleSeries), head.NumStaleSeries()) + require.Equal(t, uint64(numSeries), head.NumSeries()) + } + + // Create some series with normal samples. + series1 := labels.FromStrings("name", "series1", "label", "value1") + series2 := labels.FromStrings("name", "series2", "label", "value2") + series3 := labels.FromStrings("name", "series3", "label", "value3") + + // Add normal samples to all series. + appendSample(series1, 100, 1) + appendSample(series2, 100, 2) + appendSample(series3, 100, 3) + // Still no stale series. + verifySeriesCounts(0, 3) + + // Make series1 stale by appending a stale sample. Now we should have 1 stale series. + appendSample(series1, 200, math.Float64frombits(value.StaleNaN)) + verifySeriesCounts(1, 3) + + // Make series2 stale as well. + appendSample(series2, 200, math.Float64frombits(value.StaleNaN)) + verifySeriesCounts(2, 3) + + // Add a non-stale sample to series1. It should not be counted as stale now. + appendSample(series1, 300, 10) + verifySeriesCounts(1, 3) + + // Test that series3 doesn't become stale when we add another normal sample. + appendSample(series3, 200, 10) + verifySeriesCounts(1, 3) + + // Test histogram stale samples as well. + series4 := labels.FromStrings("name", "series4", "type", "histogram") + h := tsdbutil.GenerateTestHistograms(1)[0] + appendHistogram(series4, 100, h) + verifySeriesCounts(1, 4) + + // Make histogram series stale. + staleHist := h.Copy() + staleHist.Sum = math.Float64frombits(value.StaleNaN) + appendHistogram(series4, 200, staleHist) + verifySeriesCounts(2, 4) + + // Test float histogram stale samples. + series5 := labels.FromStrings("name", "series5", "type", "float_histogram") + fh := tsdbutil.GenerateTestFloatHistograms(1)[0] + appendFloatHistogram(series5, 100, fh) + verifySeriesCounts(2, 5) + + // Make float histogram series stale. + staleFH := fh.Copy() + staleFH.Sum = math.Float64frombits(value.StaleNaN) + appendFloatHistogram(series5, 200, staleFH) + verifySeriesCounts(3, 5) + + // Make histogram sample non-stale and stale back again. + appendHistogram(series4, 210, h) + verifySeriesCounts(2, 5) + appendHistogram(series4, 220, staleHist) + verifySeriesCounts(3, 5) + + // Make float histogram sample non-stale and stale back again. + appendFloatHistogram(series5, 210, fh) + verifySeriesCounts(2, 5) + appendFloatHistogram(series5, 220, staleFH) + verifySeriesCounts(3, 5) + + // Series 1 and 3 are not stale at this point. Add a new sample to series 1 and series 5, + // so after the GC and removing series 2, 3, 4, we should be left with 1 stale and 1 non-stale series. + appendSample(series1, 400, 10) + appendFloatHistogram(series5, 400, staleFH) + verifySeriesCounts(3, 5) + + // Test garbage collection behavior - stale series should be decremented when GC'd. + // Force a garbage collection by truncating old data. + require.NoError(t, head.Truncate(300)) + + // After truncation, run GC to collect old chunks/series. + head.gc() + + // series 1 and series 5 are left. + verifySeriesCounts(1, 2) + + // Test creating a new series for each of float, histogram, float histogram that starts as stale. + // This should be counted as stale. + series6 := labels.FromStrings("name", "series6", "direct", "stale") + series7 := labels.FromStrings("name", "series7", "direct", "stale") + series8 := labels.FromStrings("name", "series8", "direct", "stale") + appendSample(series6, 400, math.Float64frombits(value.StaleNaN)) + verifySeriesCounts(2, 3) + appendHistogram(series7, 400, staleHist) + verifySeriesCounts(3, 4) + appendFloatHistogram(series8, 400, staleFH) + verifySeriesCounts(4, 5) +} From c3789ff54766cb178d2472ab9be94394532b9d29 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 6 Aug 2025 16:09:21 -0700 Subject: [PATCH 09/89] Restore stale series count on WAL replay Signed-off-by: Ganesh Vernekar --- tsdb/head_test.go | 19 ++++++++++++++++++- tsdb/head_wal.go | 27 ++++++++++++++++++++++++++- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index b1856c0419..f64a93c7f3 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -6901,6 +6901,20 @@ func TestHead_NumStaleSeries(t *testing.T) { require.Equal(t, uint64(numSeries), head.NumSeries()) } + restartHeadAndVerifySeriesCounts := func(numStaleSeries, numSeries int) { + verifySeriesCounts(numStaleSeries, numSeries) + + require.NoError(t, head.Close()) + + wal, err := wlog.NewSize(nil, nil, filepath.Join(head.opts.ChunkDirRoot, "wal"), 32768, compression.None) + require.NoError(t, err) + head, err = NewHead(nil, nil, wal, nil, head.opts, nil) + require.NoError(t, err) + require.NoError(t, head.Init(0)) + + verifySeriesCounts(numStaleSeries, numSeries) + } + // Create some series with normal samples. series1 := labels.FromStrings("name", "series1", "label", "value1") series2 := labels.FromStrings("name", "series2", "label", "value2") @@ -6920,10 +6934,12 @@ func TestHead_NumStaleSeries(t *testing.T) { // Make series2 stale as well. appendSample(series2, 200, math.Float64frombits(value.StaleNaN)) verifySeriesCounts(2, 3) + restartHeadAndVerifySeriesCounts(2, 3) // Add a non-stale sample to series1. It should not be counted as stale now. appendSample(series1, 300, 10) verifySeriesCounts(1, 3) + restartHeadAndVerifySeriesCounts(1, 3) // Test that series3 doesn't become stale when we add another normal sample. appendSample(series3, 200, 10) @@ -6946,6 +6962,7 @@ func TestHead_NumStaleSeries(t *testing.T) { fh := tsdbutil.GenerateTestFloatHistograms(1)[0] appendFloatHistogram(series5, 100, fh) verifySeriesCounts(2, 5) + restartHeadAndVerifySeriesCounts(2, 5) // Make float histogram series stale. staleFH := fh.Copy() @@ -6969,7 +6986,7 @@ func TestHead_NumStaleSeries(t *testing.T) { // so after the GC and removing series 2, 3, 4, we should be left with 1 stale and 1 non-stale series. appendSample(series1, 400, 10) appendFloatHistogram(series5, 400, staleFH) - verifySeriesCounts(3, 5) + restartHeadAndVerifySeriesCounts(3, 5) // Test garbage collection behavior - stale series should be decremented when GC'd. // Force a garbage collection by truncating old data. diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index ee6557fdad..41317bbb92 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -16,6 +16,7 @@ package tsdb import ( "errors" "fmt" + "github.com/prometheus/prometheus/model/value" "maps" "math" "os" @@ -627,6 +628,14 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp if s.T <= ms.mmMaxTime { continue } + + if !value.IsStaleNaN(ms.lastValue) && value.IsStaleNaN(s.V) { + h.numStaleSeries.Inc() + } + if value.IsStaleNaN(ms.lastValue) && !value.IsStaleNaN(s.V) { + h.numStaleSeries.Dec() + } + if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() @@ -657,12 +666,28 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp if s.t <= ms.mmMaxTime { continue } - var chunkCreated bool + var chunkCreated, newlyStale, staleToNonStale bool if s.h != nil { + newlyStale = value.IsStaleNaN(s.h.Sum) + if ms.lastHistogramValue != nil { + newlyStale = newlyStale && !value.IsStaleNaN(ms.lastHistogramValue.Sum) + staleToNonStale = value.IsStaleNaN(ms.lastHistogramValue.Sum) && !value.IsStaleNaN(s.h.Sum) + } _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, appendChunkOpts) } else { + newlyStale = value.IsStaleNaN(s.fh.Sum) + if ms.lastFloatHistogramValue != nil { + newlyStale = newlyStale && !value.IsStaleNaN(ms.lastFloatHistogramValue.Sum) + staleToNonStale = value.IsStaleNaN(ms.lastFloatHistogramValue.Sum) && !value.IsStaleNaN(s.fh.Sum) + } _, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, appendChunkOpts) } + if newlyStale { + h.numStaleSeries.Inc() + } + if staleToNonStale { + h.numStaleSeries.Dec() + } if chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() From b98cc631a28adfa465798a9c10ce17145d97685e Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 6 Aug 2025 16:22:58 -0700 Subject: [PATCH 10/89] Restore stale series count from chunk snapshots Signed-off-by: Ganesh Vernekar --- tsdb/head_test.go | 4 ++++ tsdb/head_wal.go | 8 +++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index f64a93c7f3..3c711e19a0 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -6988,6 +6988,10 @@ func TestHead_NumStaleSeries(t *testing.T) { appendFloatHistogram(series5, 400, staleFH) restartHeadAndVerifySeriesCounts(3, 5) + // This will test restarting with snapshot. + head.opts.EnableMemorySnapshotOnShutdown = true + restartHeadAndVerifySeriesCounts(3, 5) + // Test garbage collection behavior - stale series should be decremented when GC'd. // Force a garbage collection by truncating old data. require.NoError(t, head.Truncate(300)) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 41317bbb92..3e0dadb526 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -16,7 +16,6 @@ package tsdb import ( "errors" "fmt" - "github.com/prometheus/prometheus/model/value" "maps" "math" "os" @@ -33,6 +32,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -1607,6 +1607,12 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie series.lastHistogramValue = csr.lastHistogramValue series.lastFloatHistogramValue = csr.lastFloatHistogramValue + if value.IsStaleNaN(series.lastValue) || + (series.lastHistogramValue != nil && value.IsStaleNaN(series.lastHistogramValue.Sum)) || + (series.lastFloatHistogramValue != nil && value.IsStaleNaN(series.lastFloatHistogramValue.Sum)) { + h.numStaleSeries.Inc() + } + app, err := series.headChunks.chunk.Appender() if err != nil { errChan <- err From b1802bae0c055280e03dd8d723f0fd15ae379b82 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 26 Aug 2025 15:47:06 +0200 Subject: [PATCH 11/89] Add changlog entry Add changelog entry to pull in #16925 to v3.6.0. Signed-off-by: SuperQ --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 064d8b2482..cadfe85d92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## main / unreleased +* [ENHANCEMENT] TSDB: Track stale series in the Head block based on stale sample. #16925 + ## 3.6.0-rc.0 / 2025-08-12 * [FEATURE] PromQL: Add `step()`, and `min()` and `max()` on durations, behind feature flag `promql-duration-expr`. #16777 From 739791a2852a6dbe389fa94973afead74b7a8260 Mon Sep 17 00:00:00 2001 From: Duciwuci Date: Tue, 2 Sep 2025 10:01:47 +0200 Subject: [PATCH 12/89] update script for internal and web Signed-off-by: Duciwuci --- scripts/bump_go_version.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/bump_go_version.sh b/scripts/bump_go_version.sh index 52640358d4..679286c908 100755 --- a/scripts/bump_go_version.sh +++ b/scripts/bump_go_version.sh @@ -19,6 +19,8 @@ printf "Current minimum supported version: ${CURRENT_VERSION}\nNew minimum suppo # Update go.mod files go mod edit -go=${NEW_VERSION}.0 go mod edit -go=${NEW_VERSION}.0 documentation/examples/remote_storage/go.mod +go mod edit -go=${NEW_VERSION}.0 web/ui/mantine-ui/src/promql/tools/go.mod +go mod edit -go=${NEW_VERSION}.0 internal/tools/go.mod # Update .promu.yml sed -i "s/version: ${NEW_VERSION}/version: ${LATEST_VERSION}/g" .promu.yml From 070ffd7edb5efe9c6c2756614f072bb5964ab4bb Mon Sep 17 00:00:00 2001 From: Duciwuci Date: Tue, 2 Sep 2025 10:02:39 +0200 Subject: [PATCH 13/89] bump go version across all stages Signed-off-by: Duciwuci --- .github/workflows/ci.yml | 16 ++++++++-------- .promu.yml | 2 +- documentation/examples/remote_storage/go.mod | 2 +- go.mod | 2 +- internal/tools/go.mod | 2 +- scripts/golangci-lint.yml | 2 +- web/ui/mantine-ui/src/promql/tools/go.mod | 2 +- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f64be9366e..defe385667 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: container: # Whenever the Go version is updated here, .promu.yml # should also be updated. - image: quay.io/prometheus/golang-builder:1.24-base + image: quay.io/prometheus/golang-builder:1.25-base steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: @@ -32,7 +32,7 @@ jobs: name: More Go tests runs-on: ubuntu-latest container: - image: quay.io/prometheus/golang-builder:1.24-base + image: quay.io/prometheus/golang-builder:1.25-base steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: @@ -55,7 +55,7 @@ jobs: GOTOOLCHAIN: local container: # The go version in this image should be N-1 wrt test_go. - image: quay.io/prometheus/golang-builder:1.23-base + image: quay.io/prometheus/golang-builder:1.24-base steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: @@ -70,7 +70,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.24-base + image: quay.io/prometheus/golang-builder:1.25-base steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -97,7 +97,7 @@ jobs: persist-credentials: false - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: - go-version: 1.24.x + go-version: 1.25.x - run: | $TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"} go test $TestTargets -vet=off -v @@ -109,7 +109,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.24-base + image: quay.io/prometheus/golang-builder:1.25-base steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: @@ -208,7 +208,7 @@ jobs: uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: cache: false - go-version: 1.24.x + go-version: 1.25.x - name: Run goyacc and check for diff run: make install-goyacc check-generated-parser golangci: @@ -222,7 +222,7 @@ jobs: - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: - go-version: 1.24.x + go-version: 1.25.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' diff --git a/.promu.yml b/.promu.yml index d16bceeed9..d5205b4fdf 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,7 +1,7 @@ go: # Whenever the Go version is updated here, # .github/workflows should also be updated. - version: 1.24 + version: 1.25 repository: path: github.com/prometheus/prometheus build: diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index aa95d2faa8..352317dcdf 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -1,6 +1,6 @@ module github.com/prometheus/prometheus/documentation/examples/remote_storage -go 1.23.0 +go 1.24.0 require ( github.com/alecthomas/kingpin/v2 v2.4.0 diff --git a/go.mod b/go.mod index 8ecfe26e8f..92878a1910 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/prometheus/prometheus -go 1.23.0 +go 1.24.0 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 05803fedaf..f5cc5dcf24 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -1,6 +1,6 @@ module github.com/prometheus/prometheus/internal/tools -go 1.23.0 +go 1.24.0 require ( github.com/bufbuild/buf v1.51.0 diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index d5d9ca2eb4..1816a556b2 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -30,7 +30,7 @@ jobs: - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: - go-version: 1.24.x + go-version: 1.25.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' diff --git a/web/ui/mantine-ui/src/promql/tools/go.mod b/web/ui/mantine-ui/src/promql/tools/go.mod index 0d0b46e547..6983cf4fe6 100644 --- a/web/ui/mantine-ui/src/promql/tools/go.mod +++ b/web/ui/mantine-ui/src/promql/tools/go.mod @@ -1,6 +1,6 @@ module github.com/prometheus/prometheus/web/ui/mantine-ui/src/promql/tools -go 1.23.0 +go 1.24.0 require ( github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc From 76a0cce3ac8d6f406c39eca95101bc0cb6c4330f Mon Sep 17 00:00:00 2001 From: Duciwuci Date: Tue, 2 Sep 2025 14:37:30 +0200 Subject: [PATCH 14/89] adjust windows error message Signed-off-by: Duciwuci --- cmd/promtool/main_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 17acb5ad02..3f7cf5c166 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -298,7 +298,7 @@ func TestCheckConfigSyntax(t *testing.T) { err: "error checking client cert file \"testdata/nonexistent_cert_file.yml\": " + "stat testdata/nonexistent_cert_file.yml: no such file or directory", errWindows: "error checking client cert file \"testdata\\\\nonexistent_cert_file.yml\": " + - "CreateFile testdata\\nonexistent_cert_file.yml: The system cannot find the file specified.", + "GetFileAttributesEx testdata\\nonexistent_cert_file.yml: The system cannot find the file specified.", }, { name: "check with syntax only succeeds with nonexistent credentials file", @@ -314,7 +314,7 @@ func TestCheckConfigSyntax(t *testing.T) { err: "error checking authorization credentials or bearer token file \"/random/file/which/does/not/exist.yml\": " + "stat /random/file/which/does/not/exist.yml: no such file or directory", errWindows: "error checking authorization credentials or bearer token file \"testdata\\\\random\\\\file\\\\which\\\\does\\\\not\\\\exist.yml\": " + - "CreateFile testdata\\random\\file\\which\\does\\not\\exist.yml: The system cannot find the path specified.", + "GetFileAttributesEx testdata\\random\\file\\which\\does\\not\\exist.yml: The system cannot find the path specified.", }, } for _, test := range cases { From 87d7c12563c8e1786bce878bae78237c9315d467 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 2 Sep 2025 22:32:36 +0200 Subject: [PATCH 15/89] promql: Fix trigger for `HistogramStatsIterator` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR #16702 introduced a regression because it was too strict in detecting the condition for using the `HistogramStatsIterator`. It essentially required the triggering function to be buried at least one level deep. `histogram_count(sum(rate(native_histogram_series[2m]))` would not trigger anymore, but `1*histogram_count(sum(rate(native_histogram_series[2m]))` would. Ironically, PR #16682 made the performance of the `HistogramStatsIterator` so much worse that _not_ using it was often better, but this has to be addressed in a separate commit. This commit reinstates the previous `HistogramStatsIterator` detection behavior, as PR #16702 intended to keep it. Relevant benchmark changes with this commit (i.e. old is without using `HistogramStatsIterator`, new is with `HistogramStatsIterator`): name old time/op new time/op delta NativeHistograms/histogram_count_with_short_rate_interval-16 802ms ± 3% 837ms ± 3% +4.42% (p=0.008 n=5+5) NativeHistograms/histogram_count_with_long_rate_interval-16 1.22s ± 3% 1.11s ± 1% -9.46% (p=0.008 n=5+5) NativeHistogramsCustomBuckets/histogram_count_with_short_rate_interval-16 611ms ± 5% 751ms ± 6% +22.87% (p=0.008 n=5+5) NativeHistogramsCustomBuckets/histogram_count_with_long_rate_interval-16 975ms ± 4% 1131ms ±11% +16.04% (p=0.008 n=5+5) name old alloc/op new alloc/op delta NativeHistograms/histogram_count_with_short_rate_interval-16 222MB ± 0% 531MB ± 0% +139.63% (p=0.008 n=5+5) NativeHistograms/histogram_count_with_long_rate_interval-16 323MB ± 0% 528MB ± 0% +63.81% (p=0.008 n=5+5) NativeHistogramsCustomBuckets/histogram_count_with_short_rate_interval-16 179MB ± 0% 452MB ± 0% +153.07% (p=0.016 n=4+5) NativeHistogramsCustomBuckets/histogram_count_with_long_rate_interval-16 175MB ± 0% 452MB ± 0% +157.73% (p=0.016 n=4+5) name old allocs/op new allocs/op delta NativeHistograms/histogram_count_with_short_rate_interval-16 4.48M ± 0% 8.95M ± 0% +99.51% (p=0.008 n=5+5) NativeHistograms/histogram_count_with_long_rate_interval-16 5.02M ± 0% 8.84M ± 0% +75.89% (p=0.008 n=5+5) NativeHistogramsCustomBuckets/histogram_count_with_short_rate_interval-16 3.00M ± 0% 5.96M ± 0% +98.93% (p=0.008 n=5+5) NativeHistogramsCustomBuckets/histogram_count_with_long_rate_interval-16 2.89M ± 0% 5.86M ± 0% +102.69% (p=0.016 n=4+5) Signed-off-by: beorn7 --- promql/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/promql/engine.go b/promql/engine.go index 9024df83da..2feb6c3c92 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3891,7 +3891,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) { return nil } - for i := len(path) - 1; i > 0; i-- { // Walk backwards up the path. + for i := len(path) - 1; i >= 0; i-- { // Walk backwards up the path. call, ok := path[i].(*parser.Call) if !ok { continue From 5010bd4bb1ad8964a802ce8e8b1ddd59cdbf2317 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 2 Sep 2025 23:35:49 +0200 Subject: [PATCH 16/89] promql: Optimize `HistogramStatsIterator` by disallowing integer histograms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The `HistogramStatsIterator` is only meant to be used within PromQL. PromQL only ever uses float histograms. If `HistogramStatsIterator` is capable of handling integer histograms, it will still be used, for example by the `BufferedSeriesIterator`, which buffers samples and will use an integer `Histogram` for it, if the underlying chunk is an integer histogram chunk (which is common). However, we can simply intercept the `Next` and `Seek` calls and pretend to only ever be able te return float histograms. This has the welcome side effect that we do not have to handle a mix of float and integer histograms in the `HistogramStatsIterator` anymore. With this commit, the `AtHistogram` call has been changed to panic so that we ensure it is never called. Benchmark differences between this and the previous commit: name old time/op new time/op delta NativeHistograms/histogram_count_with_short_rate_interval-16 837ms ± 3% 616ms ± 2% -26.36% (p=0.008 n=5+5) NativeHistograms/histogram_count_with_long_rate_interval-16 1.11s ± 1% 0.91s ± 3% -17.75% (p=0.008 n=5+5) NativeHistogramsCustomBuckets/histogram_count_with_short_rate_interval-16 751ms ± 6% 581ms ± 1% -22.63% (p=0.008 n=5+5) NativeHistogramsCustomBuckets/histogram_count_with_long_rate_interval-16 1.13s ±11% 0.85s ± 2% -24.59% (p=0.008 n=5+5) name old alloc/op new alloc/op delta NativeHistograms/histogram_count_with_short_rate_interval-16 531MB ± 0% 148MB ± 0% -72.08% (p=0.008 n=5+5) NativeHistograms/histogram_count_with_long_rate_interval-16 528MB ± 0% 145MB ± 0% -72.60% (p=0.016 n=5+4) NativeHistogramsCustomBuckets/histogram_count_with_short_rate_interval-16 452MB ± 0% 145MB ± 0% -67.97% (p=0.016 n=5+4) NativeHistogramsCustomBuckets/histogram_count_with_long_rate_interval-16 452MB ± 0% 141MB ± 0% -68.70% (p=0.016 n=5+4) name old allocs/op new allocs/op delta NativeHistograms/histogram_count_with_short_rate_interval-16 8.95M ± 0% 1.60M ± 0% -82.15% (p=0.008 n=5+5) NativeHistograms/histogram_count_with_long_rate_interval-16 8.84M ± 0% 1.49M ± 0% -83.16% (p=0.008 n=5+5) NativeHistogramsCustomBuckets/histogram_count_with_short_rate_interval-16 5.96M ± 0% 1.57M ± 0% -73.68% (p=0.008 n=5+5) NativeHistogramsCustomBuckets/histogram_count_with_long_rate_interval-16 5.86M ± 0% 1.46M ± 0% -75.05% (p=0.016 n=5+4) Signed-off-by: beorn7 --- promql/histogram_stats_iterator.go | 136 ++++++++---------------- promql/histogram_stats_iterator_test.go | 87 +++++---------- 2 files changed, 72 insertions(+), 151 deletions(-) diff --git a/promql/histogram_stats_iterator.go b/promql/histogram_stats_iterator.go index f5224825d3..bb8c610964 100644 --- a/promql/histogram_stats_iterator.go +++ b/promql/histogram_stats_iterator.go @@ -19,27 +19,24 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" ) -// HistogramStatsIterator is an iterator that returns histogram objects -// which have only their sum and count values populated. The iterator handles -// counter reset detection internally and sets the counter reset hint accordingly -// in each returned histogram object. +// HistogramStatsIterator is an iterator that returns histogram objects that +// have only their sum and count values populated. The iterator handles counter +// reset detection internally and sets the counter reset hint accordingly in +// each returned histogram object. The Next and Seek methods of the iterator +// will never return ValHistogram, but ValFloatHistogram instead. Effectively, +// the iterator enforces conversion of (integer) Histogram to FloatHistogram. +// The AtHistogram method must not be called (and will panic). type HistogramStatsIterator struct { chunkenc.Iterator - currentH *histogram.Histogram - lastH *histogram.Histogram - currentFH *histogram.FloatHistogram lastFH *histogram.FloatHistogram - - currentSeriesRead bool } // NewHistogramStatsIterator creates a new HistogramStatsIterator. func NewHistogramStatsIterator(it chunkenc.Iterator) *HistogramStatsIterator { return &HistogramStatsIterator{ Iterator: it, - currentH: &histogram.Histogram{}, currentFH: &histogram.FloatHistogram{}, } } @@ -48,44 +45,38 @@ func NewHistogramStatsIterator(it chunkenc.Iterator) *HistogramStatsIterator { // objects already allocated where possible. func (hsi *HistogramStatsIterator) Reset(it chunkenc.Iterator) { hsi.Iterator = it - hsi.currentSeriesRead = false + hsi.lastFH = nil } -// AtHistogram returns the next timestamp/histogram pair. The counter reset -// detection is guaranteed to be correct only when the caller does not switch -// between AtHistogram and AtFloatHistogram calls. -func (hsi *HistogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { - var t int64 - t, hsi.currentH = hsi.Iterator.AtHistogram(hsi.currentH) - if value.IsStaleNaN(hsi.currentH.Sum) { - h = &histogram.Histogram{Sum: hsi.currentH.Sum} - return t, h +// Next mostly relays to the underlying iterator, but changes a ValHistogram +// return into a ValFloatHistogram return. +func (hsi *HistogramStatsIterator) Next() chunkenc.ValueType { + vt := hsi.Iterator.Next() + if vt == chunkenc.ValHistogram { + return chunkenc.ValFloatHistogram } - - if h == nil { - h = &histogram.Histogram{ - CounterResetHint: hsi.getResetHint(hsi.currentH), - Count: hsi.currentH.Count, - Sum: hsi.currentH.Sum, - } - hsi.setLastH(hsi.currentH) - return t, h - } - - returnValue := histogram.Histogram{ - CounterResetHint: hsi.getResetHint(hsi.currentH), - Count: hsi.currentH.Count, - Sum: hsi.currentH.Sum, - } - returnValue.CopyTo(h) - - hsi.setLastH(hsi.currentH) - return t, h + return vt } -// AtFloatHistogram returns the next timestamp/float histogram pair. The counter -// reset detection is guaranteed to be correct only when the caller does not -// switch between AtHistogram and AtFloatHistogram calls. +// Seek mostly relays to the underlying iterator, but changes a ValHistogram +// return into a ValFloatHistogram return. +func (hsi *HistogramStatsIterator) Seek(t int64) chunkenc.ValueType { + vt := hsi.Iterator.Seek(t) + if vt == chunkenc.ValHistogram { + return chunkenc.ValFloatHistogram + } + return vt +} + +// AtHistogram must never be called. +func (*HistogramStatsIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) { + panic("HistogramStatsIterator.AtHistogram must never be called") +} + +// AtFloatHistogram returns the next timestamp/float histogram pair. The method +// performs a counter reset detection on the fly. It will return an explicit +// hint (not UnknownCounterReset) if the previous sample has been accessed with +// the same iterator. func (hsi *HistogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { var t int64 t, hsi.currentFH = hsi.Iterator.AtFloatHistogram(hsi.currentFH) @@ -114,26 +105,12 @@ func (hsi *HistogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram return t, fh } -func (hsi *HistogramStatsIterator) setLastH(h *histogram.Histogram) { - hsi.lastFH = nil - if hsi.lastH == nil { - hsi.lastH = h.Copy() - } else { - h.CopyTo(hsi.lastH) - } - - hsi.currentSeriesRead = true -} - func (hsi *HistogramStatsIterator) setLastFH(fh *histogram.FloatHistogram) { - hsi.lastH = nil if hsi.lastFH == nil { hsi.lastFH = fh.Copy() } else { fh.CopyTo(hsi.lastFH) } - - hsi.currentSeriesRead = true } func (hsi *HistogramStatsIterator) getFloatResetHint(hint histogram.CounterResetHint) histogram.CounterResetHint { @@ -141,44 +118,19 @@ func (hsi *HistogramStatsIterator) getFloatResetHint(hint histogram.CounterReset return hint } prevFH := hsi.lastFH - if prevFH == nil || !hsi.currentSeriesRead { - if hsi.lastH == nil || !hsi.currentSeriesRead { - // We don't know if there's a counter reset. - return histogram.UnknownCounterReset - } - prevFH = hsi.lastH.ToFloat(nil) + if prevFH == nil { + // We don't know if there's a counter reset. Note that this + // generally will trigger an explicit counter reset detection by + // the PromQL engine, which in turn isn't as reliable in this + // case because the PromQL engine will not see the buckets. + // However, we can assume that in cases where the counter reset + // detection is relevant, an iteration through the series has + // happened, and therefore we do not end up here in the first + // place. + return histogram.UnknownCounterReset } if hsi.currentFH.DetectReset(prevFH) { return histogram.CounterReset } return histogram.NotCounterReset } - -func (hsi *HistogramStatsIterator) getResetHint(h *histogram.Histogram) histogram.CounterResetHint { - if h.CounterResetHint != histogram.UnknownCounterReset { - return h.CounterResetHint - } - var prevFH *histogram.FloatHistogram - if hsi.lastH == nil || !hsi.currentSeriesRead { - if hsi.lastFH == nil || !hsi.currentSeriesRead { - // We don't know if there's a counter reset. Note that - // this generally will trigger an explicit counter reset - // detection by the PromQL engine, which in turn isn't - // as reliable in this case because the PromQL engine - // will not see the buckets. However, we can assume that - // in cases where the counter reset detection is - // relevant, an iteration through the series has - // happened, and therefore we do not end up here in the - // first place. - return histogram.UnknownCounterReset - } - prevFH = hsi.lastFH - } else { - prevFH = hsi.lastH.ToFloat(nil) - } - fh := h.ToFloat(nil) - if fh.DetectReset(prevFH) { - return histogram.CounterReset - } - return histogram.NotCounterReset -} diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go index 3e3f2dd4b2..b9d37ce3b9 100644 --- a/promql/histogram_stats_iterator_test.go +++ b/promql/histogram_stats_iterator_test.go @@ -114,64 +114,32 @@ func TestHistogramStatsDecoding(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - t.Run("histogram_stats", func(t *testing.T) { - check := func(statsIterator *HistogramStatsIterator) { - decodedStats := make([]*histogram.Histogram, 0) - - for statsIterator.Next() != chunkenc.ValNone { - _, h := statsIterator.AtHistogram(nil) - decodedStats = append(decodedStats, h) - } - - for i := 0; i < len(tc.histograms); i++ { - require.Equalf(t, tc.expectedHints[i], decodedStats[i].CounterResetHint, "mismatch in counter reset hint for histogram %d", i) - h := tc.histograms[i] - if value.IsStaleNaN(h.Sum) { - require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) - require.Equal(t, uint64(0), decodedStats[i].Count) - } else { - require.Equal(t, tc.histograms[i].Count, decodedStats[i].Count) - require.Equal(t, tc.histograms[i].Sum, decodedStats[i].Sum) - } + check := func(statsIterator *HistogramStatsIterator) { + decodedStats := make([]*histogram.FloatHistogram, 0) + for statsIterator.Next() != chunkenc.ValNone { + _, h := statsIterator.AtFloatHistogram(nil) + decodedStats = append(decodedStats, h) + } + for i := 0; i < len(tc.histograms); i++ { + require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint) + fh := tc.histograms[i].ToFloat(nil) + if value.IsStaleNaN(fh.Sum) { + require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) + require.Equal(t, float64(0), decodedStats[i].Count) + } else { + require.Equal(t, fh.Count, decodedStats[i].Count) + require.Equal(t, fh.Sum, decodedStats[i].Sum) } } + } - // Check that we get the expected results with a fresh iterator. - statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil)) - check(statsIterator) + // Check that we get the expected results with a fresh iterator. + statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil)) + check(statsIterator) - // Check that we get the same results if we reset and reuse that iterator. - statsIterator.Reset(newHistogramSeries(tc.histograms).Iterator(nil)) - check(statsIterator) - }) - t.Run("float_histogram_stats", func(t *testing.T) { - check := func(statsIterator *HistogramStatsIterator) { - decodedStats := make([]*histogram.FloatHistogram, 0) - for statsIterator.Next() != chunkenc.ValNone { - _, h := statsIterator.AtFloatHistogram(nil) - decodedStats = append(decodedStats, h) - } - for i := 0; i < len(tc.histograms); i++ { - require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint) - fh := tc.histograms[i].ToFloat(nil) - if value.IsStaleNaN(fh.Sum) { - require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) - require.Equal(t, float64(0), decodedStats[i].Count) - } else { - require.Equal(t, fh.Count, decodedStats[i].Count) - require.Equal(t, fh.Sum, decodedStats[i].Sum) - } - } - } - - // Check that we get the expected results with a fresh iterator. - statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil)) - check(statsIterator) - - // Check that we get the same results if we reset and reuse that iterator. - statsIterator.Reset(newHistogramSeries(tc.histograms).Iterator(nil)) - check(statsIterator) - }) + // Check that we get the same results if we reset and reuse that iterator. + statsIterator.Reset(newHistogramSeries(tc.histograms).Iterator(nil)) + check(statsIterator) }) } } @@ -193,17 +161,18 @@ func TestHistogramStatsMixedUse(t *testing.T) { histogram.NotCounterReset, histogram.CounterReset, } + // Note that statsIterator always returns float histograms. actualHints := make([]histogram.CounterResetHint, 3) typ := statsIterator.Next() - require.Equal(t, chunkenc.ValHistogram, typ) - _, h := statsIterator.AtHistogram(nil) + require.Equal(t, chunkenc.ValFloatHistogram, typ) + _, h := statsIterator.AtFloatHistogram(nil) actualHints[0] = h.CounterResetHint typ = statsIterator.Next() - require.Equal(t, chunkenc.ValHistogram, typ) - _, h = statsIterator.AtHistogram(nil) + require.Equal(t, chunkenc.ValFloatHistogram, typ) + _, h = statsIterator.AtFloatHistogram(nil) actualHints[1] = h.CounterResetHint typ = statsIterator.Next() - require.Equal(t, chunkenc.ValHistogram, typ) + require.Equal(t, chunkenc.ValFloatHistogram, typ) _, fh := statsIterator.AtFloatHistogram(nil) actualHints[2] = fh.CounterResetHint From 0cef66b12a0a0e2d9d84cd8e9781250a68d8290f Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 4 Sep 2025 13:45:24 +0200 Subject: [PATCH 17/89] promql: Forget lastFH in HistogramStatsIterator after Seek After an effective Seek, the lastFH isn't the lastFH anymore, so we should nil it out. In practice, this should only matter is sub-queries, because we are otherwise not interested in a counter reset of the first sample returned after a Seek. Sub-queries, on the other hand, always do their own counter reset detection. (For that, they would prefer to see the whole histogram, so that's another problem for another commit.) Signed-off-by: beorn7 --- promql/histogram_stats_iterator.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/promql/histogram_stats_iterator.go b/promql/histogram_stats_iterator.go index bb8c610964..408b3cfa6d 100644 --- a/promql/histogram_stats_iterator.go +++ b/promql/histogram_stats_iterator.go @@ -61,6 +61,11 @@ func (hsi *HistogramStatsIterator) Next() chunkenc.ValueType { // Seek mostly relays to the underlying iterator, but changes a ValHistogram // return into a ValFloatHistogram return. func (hsi *HistogramStatsIterator) Seek(t int64) chunkenc.ValueType { + // If the Seek is going to move the iterator, we have to forget the + // lastFH. + if t > hsi.AtT() { + hsi.lastFH = nil + } vt := hsi.Iterator.Seek(t) if vt == chunkenc.ValHistogram { return chunkenc.ValFloatHistogram From 4e94ee0109ae58b60edb8d667f9c4b56769738db Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 4 Sep 2025 14:16:50 +0200 Subject: [PATCH 18/89] promqltest: Add test to demonstrate sub-query bug with HistogramStatsIterator To fix this, we need to make sure that HistogramStatsIterator is not used with a sub-query in the path. Signed-off-by: beorn7 --- .../promqltest/testdata/native_histograms.test | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 0958b8951e..eb8cfb3432 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1608,6 +1608,24 @@ eval instant at 1m histogram_quantile(0.5, myHistogram2) eval instant at 1m histogram_quantile(0.5, mixedHistogram) expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "mixedHistogram" +clear + +# A counter reset only in a bucket. Sub-queries still need to detect +# it via explicit counter reset detection. This test also runs it with +# histogram_count in the expression to make sure that the +# HistogramStatsIterator is not used. (The latter fails to correctly +# do the counter resets because Seek is used with sub-queries. And the +# explicit counter reset detection done with sub-queries cannot access +# the buckets anymore, if HistogramStatsIterator is used.) +load 1m + h{} {{schema:0 count:1 sum:10 buckets:[1]}}+{{}}x20 {{schema:0 count:1 sum:10 buckets:[0 1]}}+{{}}x20 + +# Both evals below should yield the same value for the count. +eval instant at 41m histogram_count(increase(h[40m:9m])) + {} 1.4814814814814814 + +eval instant at 41m increase(h[40m:9m]) + {} {{count:1.4814814814814814 sum:14.814814814814813 counter_reset_hint:gauge offset:1 buckets:[1.4814814814814814]}} clear From 0746f388b047e130e6864d145bb18c47483b3ad8 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 4 Sep 2025 14:36:56 +0200 Subject: [PATCH 19/89] promql: Fix HistogramStatsIterator usage for subqueries Signed-off-by: beorn7 --- promql/engine.go | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 2feb6c3c92..91257eae37 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3891,20 +3891,34 @@ func detectHistogramStatsDecoding(expr parser.Expr) { return nil } + pathLoop: for i := len(path) - 1; i >= 0; i-- { // Walk backwards up the path. + if _, ok := path[i].(*parser.SubqueryExpr); ok { + // If we ever see a subquery in the path, we + // will not skip the buckets. We need the + // buckets for correct counter reset detection. + n.SkipHistogramBuckets = false + break pathLoop + } call, ok := path[i].(*parser.Call) if !ok { - continue + continue pathLoop } switch call.Func.Name { case "histogram_count", "histogram_sum", "histogram_avg": + // We allow skipping buckets preliminarily. But + // we will continue through the path to see if + // we find a subquery (or a histogram function) + // further up (the latter wouldn't make sense, + // but no harm in detecting it). n.SkipHistogramBuckets = true case "histogram_quantile", "histogram_fraction": + // If we ever see a function that needs the + // whole histogram, we will not skip the + // buckets. n.SkipHistogramBuckets = false - default: - continue + break pathLoop } - break } return errors.New("stop") }) From 913cc8f72b8a4f6ae4beb1d168c16a88ca4705ab Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Sat, 6 Sep 2025 13:04:24 +0200 Subject: [PATCH 20/89] Replace gopkg.in/yaml.v2 with go.yaml.in/yaml/v2 (#17151) * Replace gopkg.in/yaml.v2 with go.yaml.in/yaml/v2 * Upgrade to client_golang@v1.23.2 --------- Signed-off-by: Arve Knudsen --- cmd/promtool/main.go | 2 +- cmd/promtool/unittest.go | 2 +- config/config.go | 2 +- config/config_test.go | 2 +- config/reload.go | 2 +- discovery/consul/consul_test.go | 2 +- discovery/discovery_test.go | 2 +- discovery/dns/dns_test.go | 2 +- discovery/file/file.go | 2 +- discovery/moby/docker_test.go | 2 +- discovery/moby/mock_test.go | 2 +- discovery/moby/nodes_test.go | 2 +- discovery/moby/services_test.go | 2 +- discovery/moby/tasks_test.go | 2 +- discovery/ovhcloud/dedicated_server_test.go | 2 +- discovery/ovhcloud/ovhcloud_test.go | 2 +- discovery/ovhcloud/vps_test.go | 2 +- discovery/registry.go | 2 +- discovery/scaleway/instance_test.go | 2 +- discovery/targetgroup/targetgroup_test.go | 2 +- discovery/xds/kuma_test.go | 2 +- documentation/examples/remote_storage/go.mod | 107 +++-- documentation/examples/remote_storage/go.sum | 473 ++++++++++--------- go.mod | 7 +- go.sum | 10 +- model/labels/labels_test.go | 2 +- model/relabel/relabel_test.go | 2 +- notifier/alertmanagerset.go | 2 +- notifier/manager_test.go | 2 +- plugins/generate.go | 2 +- rules/alerting.go | 2 +- rules/manager_test.go | 2 +- rules/recording.go | 2 +- scrape/manager_test.go | 2 +- storage/remote/azuread/azuread_test.go | 2 +- storage/remote/storage.go | 2 +- web/ui/mantine-ui/src/promql/tools/go.sum | 4 +- 37 files changed, 362 insertions(+), 303 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index caf4364a2e..6357e55cc1 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -43,7 +43,7 @@ import ( "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/exporter-toolkit/web" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 3fbed5546c..500f46c9ca 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -33,7 +33,7 @@ import ( "github.com/nsf/jsondiff" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" diff --git a/config/config.go b/config/config.go index 192c216290..8e7afc1f2f 100644 --- a/config/config.go +++ b/config/config.go @@ -33,7 +33,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/otlptranslator" "github.com/prometheus/sigv4" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/labels" diff --git a/config/config_test.go b/config/config_test.go index 971fdb85f5..1f093c7959 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -31,7 +31,7 @@ import ( "github.com/prometheus/common/promslog" "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/aws" diff --git a/config/reload.go b/config/reload.go index cc0cc97158..07a077a6a9 100644 --- a/config/reload.go +++ b/config/reload.go @@ -21,7 +21,7 @@ import ( "path/filepath" promconfig "github.com/prometheus/common/config" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" ) type ExternalFilesConfig struct { diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index a450cf216f..a6ff4a625e 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -27,7 +27,7 @@ import ( "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" diff --git a/discovery/discovery_test.go b/discovery/discovery_test.go index af327195f2..116095fd62 100644 --- a/discovery/discovery_test.go +++ b/discovery/discovery_test.go @@ -17,7 +17,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" ) func TestConfigsCustomUnMarshalMarshal(t *testing.T) { diff --git a/discovery/dns/dns_test.go b/discovery/dns/dns_test.go index a1c161789c..eb37f1a98e 100644 --- a/discovery/dns/dns_test.go +++ b/discovery/dns/dns_test.go @@ -26,7 +26,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" diff --git a/discovery/file/file.go b/discovery/file/file.go index dfcc904fbe..e0225891ce 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -33,7 +33,7 @@ import ( "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go index 00e6a3e4f3..430669c113 100644 --- a/discovery/moby/docker_test.go +++ b/discovery/moby/docker_test.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" ) diff --git a/discovery/moby/mock_test.go b/discovery/moby/mock_test.go index 7ef5cb07c3..2450ca4436 100644 --- a/discovery/moby/mock_test.go +++ b/discovery/moby/mock_test.go @@ -24,7 +24,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/util/strutil" ) diff --git a/discovery/moby/nodes_test.go b/discovery/moby/nodes_test.go index 973b83c4b6..35676a3a8d 100644 --- a/discovery/moby/nodes_test.go +++ b/discovery/moby/nodes_test.go @@ -22,7 +22,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" ) diff --git a/discovery/moby/services_test.go b/discovery/moby/services_test.go index 7a966cfeee..af6ce842d1 100644 --- a/discovery/moby/services_test.go +++ b/discovery/moby/services_test.go @@ -22,7 +22,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" ) diff --git a/discovery/moby/tasks_test.go b/discovery/moby/tasks_test.go index 59d8831c3b..afb19abbee 100644 --- a/discovery/moby/tasks_test.go +++ b/discovery/moby/tasks_test.go @@ -22,7 +22,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" ) diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go index f9dbd6af9c..686fa7ef3f 100644 --- a/discovery/ovhcloud/dedicated_server_test.go +++ b/discovery/ovhcloud/dedicated_server_test.go @@ -24,7 +24,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" ) func TestOvhcloudDedicatedServerRefresh(t *testing.T) { diff --git a/discovery/ovhcloud/ovhcloud_test.go b/discovery/ovhcloud/ovhcloud_test.go index 84a35af3ad..8f2272b746 100644 --- a/discovery/ovhcloud/ovhcloud_test.go +++ b/discovery/ovhcloud/ovhcloud_test.go @@ -22,7 +22,7 @@ import ( "github.com/prometheus/common/config" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery" ) diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go index d7a2a705c6..051d52e85e 100644 --- a/discovery/ovhcloud/vps_test.go +++ b/discovery/ovhcloud/vps_test.go @@ -24,7 +24,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" - yaml "gopkg.in/yaml.v2" + yaml "go.yaml.in/yaml/v2" ) func TestOvhCloudVpsRefresh(t *testing.T) { diff --git a/discovery/registry.go b/discovery/registry.go index 98c956704e..03eb9e98c4 100644 --- a/discovery/registry.go +++ b/discovery/registry.go @@ -23,7 +23,7 @@ import ( "sync" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/discovery/targetgroup" ) diff --git a/discovery/scaleway/instance_test.go b/discovery/scaleway/instance_test.go index 11ef36d353..b67b858ae0 100644 --- a/discovery/scaleway/instance_test.go +++ b/discovery/scaleway/instance_test.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" ) var ( diff --git a/discovery/targetgroup/targetgroup_test.go b/discovery/targetgroup/targetgroup_test.go index e0df05ab9a..d68e29644a 100644 --- a/discovery/targetgroup/targetgroup_test.go +++ b/discovery/targetgroup/targetgroup_test.go @@ -19,7 +19,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" ) func TestTargetGroupStrictJSONUnmarshal(t *testing.T) { diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go index 23d754c4b7..faf1ab14d2 100644 --- a/discovery/xds/kuma_test.go +++ b/discovery/xds/kuma_test.go @@ -24,9 +24,9 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "go.yaml.in/yaml/v2" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" - "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index aa95d2faa8..1052020d02 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -7,73 +7,117 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v1.0.0 github.com/influxdata/influxdb-client-go/v2 v2.14.0 - github.com/prometheus/client_golang v1.22.0 - github.com/prometheus/common v0.65.0 - github.com/prometheus/prometheus v1.99.0 - github.com/stretchr/testify v1.10.0 + github.com/prometheus/client_golang v1.23.2 + github.com/prometheus/common v0.66.1 + github.com/prometheus/prometheus v0.305.1-0.20250905124657-5c2e43f09c03 + github.com/stretchr/testify v1.11.1 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect + cloud.google.com/go/auth v0.16.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect - github.com/aws/aws-sdk-go v1.55.5 // indirect + github.com/aws/aws-sdk-go-v2 v1.37.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.29.14 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect + github.com/aws/smithy-go v1.22.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.3.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect github.com/golang-jwt/jwt/v5 v5.2.2 // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect + github.com/hashicorp/go-version v1.7.0 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.0 // indirect + github.com/knadh/koanf/maps v0.1.2 // indirect + github.com/knadh/koanf/providers/confmap v1.0.0 // indirect + github.com/knadh/koanf/v2 v2.2.1 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/oapi-codegen/runtime v1.0.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/otlptranslator v0.0.2 // indirect + github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/sigv4 v0.2.0 // indirect + github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/collector/pdata v1.31.0 // indirect - go.opentelemetry.io/collector/semconv v0.125.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/collector/component v1.35.0 // indirect + go.opentelemetry.io/collector/confmap v1.35.0 // indirect + go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 // indirect + go.opentelemetry.io/collector/consumer v1.35.0 // indirect + go.opentelemetry.io/collector/featuregate v1.35.0 // indirect + go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect + go.opentelemetry.io/collector/pdata v1.35.0 // indirect + go.opentelemetry.io/collector/pipeline v0.129.0 // indirect + go.opentelemetry.io/collector/processor v1.35.0 // indirect + go.opentelemetry.io/collector/semconv v0.128.0 // indirect + go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/log v0.12.2 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/sdk v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.38.0 // indirect - golang.org/x/net v0.40.0 // indirect + go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/net v0.43.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.25.0 // indirect - golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect - google.golang.org/grpc v1.72.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/time v0.12.0 // indirect + google.golang.org/api v0.239.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/grpc v1.73.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apimachinery v0.32.3 // indirect k8s.io/client-go v0.32.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) exclude ( @@ -87,6 +131,3 @@ exclude ( // Fixing ambiguous import: found package google.golang.org/genproto/googleapis/api/annotations in multiple modules. google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 ) - -// TODO(bwplotka): Move to main branch commit or perhaps released version. -replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index fd0d676adf..b39bcf6ab5 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,15 +1,25 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= +cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= @@ -17,50 +27,75 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5 github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= -github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.37.0 h1:YtCOESR/pN4j5oA7cVHSfOwIcuh/KwHC4DOSXFbv5F0= +github.com/aws/aws-sdk-go-v2 v1.37.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= +github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= +github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 h1:H2iZoqW/v2Jnrh1FnU725Bq6KJ0k2uP63yH+DcY+HUI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0/go.mod h1:L0FqLbwMXHvNC/7crWV1iIxUlOKYZUE8KuTIA+TozAI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 h1:EDped/rNzAhFPhVY0sDGbtD16OKqksfA8OjF/kLEgw8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0/go.mod h1:uUI335jvzpZRPpjYx6ODc/wg1qH+NnoSTK/FwVeK0C0= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.237.0 h1:XHE2G+yaDQql32FZt19QmQt4WuisqQJIkMUSCxeCUl8= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.237.0/go.mod h1:t11/j/nH9i6bbsPH9xc04BJOsV2nVPUqrB67/TLDsyM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 h1:eRhU3Sh8dGbaniI6B+I48XJMrTPRkK4DKo+vqIxziOU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0/go.mod h1:paNLV18DZ6FnWE/bd06RIKPDIFpjuvCkGKWTG/GDBeM= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.44.0 h1:QiiCqpKy0prxq+92uWfESzcb7/8Y9JAamcMOzVYLEoM= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.44.0/go.mod h1:ESppxYqXQCpCY+KWl3BdkQjmsQX6zxKP39SnDtRDoU0= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= +github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k= +github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= -github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/digitalocean/godo v1.157.0 h1:ReELaS6FxXNf8gryUiVH0wmyUmZN8/NCmBX4gXd3F0o= +github.com/digitalocean/godo v1.157.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.0+incompatible h1:ffS62aKWupCWdvcee7nBU9fhnmknOqDPaJAMtfK0ImQ= +github.com/docker/docker v28.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= -github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84= +github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= @@ -74,20 +109,10 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -95,41 +120,29 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= -github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= -github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= +github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= +github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= +github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= +github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -137,16 +150,22 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= -github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= +github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E= +github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= -github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= -github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= +github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg= +github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= @@ -163,97 +182,100 @@ github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISH github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= -github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A= +github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= -github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= +github.com/hetznercloud/hcloud-go/v2 v2.21.1 h1:IH3liW8/cCRjfJ4cyqYvw3s1ek+KWP8dl1roa0lD8JM= +github.com/hetznercloud/hcloud-go/v2 v2.21.1/go.mod h1:XOaYycZJ3XKMVWzmqQ24/+1V7ormJHmPdck/kxrNnQA= github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4= github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= -github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= -github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/ionos-cloud/sdk-go/v6 v6.3.4 h1:jTvGl4LOF8v8OYoEIBNVwbFoqSGAFqn6vGE7sp7/BqQ= +github.com/ionos-cloud/sdk-go/v6 v6.3.4/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= +github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE= +github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A= +github.com/knadh/koanf/v2 v2.2.1 h1:jaleChtw85y3UdBnI0wCqcg1sj1gPoz6D3caGNHtrNE= +github.com/knadh/koanf/v2 v2.2.1/go.mod h1:PSFru3ufQgTsI7IF+95rf9s8XA1+aHxKuO/W+dPoHEY= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= -github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= +github.com/linode/linodego v1.52.2 h1:N9ozU27To1LMSrDd8WvJZ5STSz1eGYdyLnxhAR/dIZg= +github.com/linode/linodego v1.52.2/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= -github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= +github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= +github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oapi-codegen/runtime v1.0.0 h1:P4rqFX5fMFWqRzY9M/3YF9+aPSPPB06IzP2P7oOxrWo= github.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18B34OO356yJ/A= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s= +github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 h1:2pzb6bC/AAfciC9DN+8d7Y8Rsk8ZPCfp/ACTfZu87FQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0/go.mod h1:tIE4dzdxuM7HnFeYA6sj5zfLuUA/JxzQ+UDl1YrHvQw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0 h1:ydkfqpZ5BWZfEJEs7OUhTHW59og5aZspbUYxoGcAEok= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0/go.mod h1:oA+49dkzmhUx0YFC9JXGuPPSBL0TOTp6jkv7qSr2n0Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 h1:AOVxBvCZfTPj0GLGqBVHpAnlC9t9pl1JXUQXymHliiY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0/go.mod h1:0CAJ32V/bCUBhNTEvnN9wlOG5IsyZ+Bmhe9e3Eri7CU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 h1:yDLSAoIi3jNt4R/5xN4IJ9YAg1rhOShgchlO/ESv8EY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0/go.mod h1:IXQHbTPxqNcuu44FvkyvpYJ6Qy4wh4YsCVkKsp0Flzo= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= -github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= +github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE= +github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= @@ -261,53 +283,45 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= -github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= -github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= -github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc= -github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/otlptranslator v0.0.2 h1:+1CdeLVrRQ6Psmhnobldo0kTp96Rj80DRXRd5OSnMEQ= +github.com/prometheus/otlptranslator v0.0.2/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/prometheus v0.305.1-0.20250905124657-5c2e43f09c03 h1:NIVtqQm7NTsUcxfjdHuVE7pw3GVjEgwL6a9ADLSj+Wg= +github.com/prometheus/prometheus v0.305.1-0.20250905124657-5c2e43f09c03/go.mod h1:9D9CfSEbKg087QXXz2ev+G1SoB6MqQE0ll4jCmrgCe0= +github.com/prometheus/sigv4 v0.2.0 h1:qDFKnHYFswJxdzGeRP63c4HlH3Vbn1Yf/Ao2zabtVXk= +github.com/prometheus/sigv4 v0.2.0/go.mod h1:D04rqmAaPPEUkjRQxGqjoxdyJuyCh6E0M18fZr0zBiE= +github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= +github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= +github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= +github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= +github.com/stackitcloud/stackit-sdk-go/core v0.17.2 h1:jPyn+i8rkp2hM80+hOg0B/1EVRbMt778Tr5RWyK1m2E= +github.com/stackitcloud/stackit-sdk-go/core v0.17.2/go.mod h1:8KIw3czdNJ9sdil9QQimxjR6vHjeINFrRv0iZ67wfn0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -318,125 +332,132 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/collector/pdata v1.31.0 h1:P5WuLr1l2JcIvr6Dw2hl01ltp2ZafPnC4Isv+BLTBqU= -go.opentelemetry.io/collector/pdata v1.31.0/go.mod h1:m41io9nWpy7aCm/uD1L9QcKiZwOP0ldj83JEA34dmlk= -go.opentelemetry.io/collector/semconv v0.125.0 h1:SyRP617YGvNSWRSKMy7Lbk9RaJSR+qFAAfyxJOeZe4s= -go.opentelemetry.io/collector/semconv v0.125.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/collector/component v1.35.0 h1:JpvBukEcEUvJ/TInF1KYpXtWEP+C7iYkxCHKjI0o7BQ= +go.opentelemetry.io/collector/component v1.35.0/go.mod h1:hU/ieWPxWbMAacODCSqem5ZaN6QH9W5GWiZ3MtXVuwc= +go.opentelemetry.io/collector/component/componentstatus v0.129.0 h1:ejpBAt7hXAAZiQKcSxLvcy8sj8SjY4HOLdoXIlW6ybw= +go.opentelemetry.io/collector/component/componentstatus v0.129.0/go.mod h1:/dLPIxn/tRMWmGi+DPtuFoBsffOLqPpSZ2IpEQzYtwI= +go.opentelemetry.io/collector/component/componenttest v0.129.0 h1:gpKkZGCRPu3Yn0U2co09bMvhs17yLFb59oV8Gl9mmRI= +go.opentelemetry.io/collector/component/componenttest v0.129.0/go.mod h1:JR9k34Qvd/pap6sYkPr5QqdHpTn66A5lYeYwhenKBAM= +go.opentelemetry.io/collector/confmap v1.35.0 h1:U4JDATAl4PrKWe9bGHbZkoQXmJXefWgR2DIkFvw8ULQ= +go.opentelemetry.io/collector/confmap v1.35.0/go.mod h1:qX37ExVBa+WU4jWWJCZc7IJ+uBjb58/9oL+/ctF1Bt0= +go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 h1:Q/+pJKrkCaMPSoSAH2BpC3UZCh+5hTiFkh/bdy5yChk= +go.opentelemetry.io/collector/confmap/xconfmap v0.129.0/go.mod h1:RNMnlay2meJDXcKjxiLbST9/YAhKLJlj0kZCrJrLGgw= +go.opentelemetry.io/collector/consumer v1.35.0 h1:mgS42yh1maXBIE65IT4//iOA89BE+7xSUzV8czyevHg= +go.opentelemetry.io/collector/consumer v1.35.0/go.mod h1:9sSPX0hDHaHqzR2uSmfLOuFK9v3e9K3HRQ+fydAjOWs= +go.opentelemetry.io/collector/consumer/consumertest v0.129.0 h1:kRmrAgVvPxH5c/rTaOYAzyy0YrrYhQpBNkuqtDRrgeU= +go.opentelemetry.io/collector/consumer/consumertest v0.129.0/go.mod h1:JgJKms1+v/CuAjkPH+ceTnKeDgUUGTQV4snGu5wTEHY= +go.opentelemetry.io/collector/consumer/xconsumer v0.129.0 h1:bRyJ9TGWwnrUnB5oQGTjPhxpVRbkIVeugmvks22bJ4A= +go.opentelemetry.io/collector/consumer/xconsumer v0.129.0/go.mod h1:pbe5ZyPJrtzdt/RRI0LqfT1GVBiJLbtkDKx3SBRTiTY= +go.opentelemetry.io/collector/featuregate v1.35.0 h1:c/XRtA35odgxVc4VgOF/PTIk7ajw1wYdQ6QI562gzd4= +go.opentelemetry.io/collector/featuregate v1.35.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc= +go.opentelemetry.io/collector/internal/telemetry v0.129.0 h1:jkzRpIyMxMGdAzVOcBe8aRNrbP7eUrMq6cxEHe0sbzA= +go.opentelemetry.io/collector/internal/telemetry v0.129.0/go.mod h1:riAPlR2LZBV7VEx4LicOKebg3N1Ja3izzkv5fl1Lhiw= +go.opentelemetry.io/collector/pdata v1.35.0 h1:ck6WO6hCNjepADY/p9sT9/rLECTLO5ukYTumKzsqB/E= +go.opentelemetry.io/collector/pdata v1.35.0/go.mod h1:pttpb089864qG1k0DMeXLgwwTFLk+o3fAW9I6MF9tzw= +go.opentelemetry.io/collector/pdata/pprofile v0.129.0 h1:DgZTvjOGmyZRx7Or80hz8XbEaGwHPkIh2SX1A5eXttQ= +go.opentelemetry.io/collector/pdata/pprofile v0.129.0/go.mod h1:uUBZxqJNOk6QIMvbx30qom//uD4hXJ1K/l3qysijMLE= +go.opentelemetry.io/collector/pdata/testdata v0.129.0 h1:n1QLnLOtrcAR57oMSVzmtPsQEpCc/nE5Avk1xfuAkjY= +go.opentelemetry.io/collector/pdata/testdata v0.129.0/go.mod h1:RfY5IKpmcvkS2IGVjl9jG9fcT7xpQEBWpg9sQOn/7mY= +go.opentelemetry.io/collector/pipeline v0.129.0 h1:Mp7RuKLizLQJ0381eJqKQ0zpgkFlhTE9cHidpJQIvMU= +go.opentelemetry.io/collector/pipeline v0.129.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= +go.opentelemetry.io/collector/processor v1.35.0 h1:YOfHemhhodYn4BnPjN7kWYYDhzPVqRkyHCaQ8mAlavs= +go.opentelemetry.io/collector/processor v1.35.0/go.mod h1:cWHDOpmpAaVNCc9K9j2/okZoLIuP/EpGGRNhM4JGmFM= +go.opentelemetry.io/collector/processor/processortest v0.129.0 h1:r5iJHdS7Ffdb2zmMVYx4ahe92PLrce5cas/AJEXivkY= +go.opentelemetry.io/collector/processor/processortest v0.129.0/go.mod h1:gdf8GzyzjGoDTA11+CPwC4jfXphtC+B7MWbWn+LIWXc= +go.opentelemetry.io/collector/processor/xprocessor v0.129.0 h1:V3Zgd+YIeu3Ij3DPlGtzdcTwpqOQIqQVcL5jdHHS7sc= +go.opentelemetry.io/collector/processor/xprocessor v0.129.0/go.mod h1:78T+AP5NO137W/E+SibQhaqOyS67fR+IN697b4JFh00= +go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4= +go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns= +go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw= +go.opentelemetry.io/contrib/bridges/otelzap v0.11.0/go.mod h1:pJPCLM8gzX4ASqLlyAXjHBEYxgbOQJ/9bidWxD6PEPQ= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0 h1:lREC4C0ilyP4WibDhQ7Gg2ygAQFP8oR07Fst/5cafwI= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0/go.mod h1:HfvuU0kW9HewH14VCOLImqKvUgONodURG7Alj/IrnGI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc= +go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= +go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989 h1:4JF7oY9CcHrPGfBLijDcXZyCzGckVEyOjuat5ktmQRg= +go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989/go.mod h1:NToOxLDCS1tXDSB2dIj44H9xGPOpKr0csIN+gnuihv4= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= -golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= +golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0= -google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= -google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= -google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo= +google.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= @@ -445,12 +466,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/go.mod b/go.mod index 825a5affb4..79d2715e1b 100644 --- a/go.mod +++ b/go.mod @@ -54,9 +54,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 github.com/ovh/go-ovh v1.9.0 github.com/prometheus/alertmanager v0.28.1 - github.com/prometheus/client_golang v1.23.0-rc.1 + github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 - github.com/prometheus/common v0.66.0 + github.com/prometheus/common v0.66.1 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/exporter-toolkit v0.14.0 github.com/prometheus/sigv4 v0.2.0 @@ -83,6 +83,7 @@ require ( go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 + go.yaml.in/yaml/v2 v2.4.2 golang.org/x/oauth2 v0.30.0 golang.org/x/sync v0.16.0 golang.org/x/sys v0.35.0 @@ -91,7 +92,6 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 google.golang.org/grpc v1.73.0 google.golang.org/protobuf v1.36.8 - gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.32.3 k8s.io/apimachinery v0.32.3 @@ -120,6 +120,7 @@ require ( go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect go.opentelemetry.io/otel/log v0.12.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect ) require ( diff --git a/go.sum b/go.sum index 217a6877df..6b3cbd5aa7 100644 --- a/go.sum +++ b/go.sum @@ -443,8 +443,8 @@ github.com/prometheus/alertmanager v0.28.1/go.mod h1:0StpPUDDHi1VXeM7p2yYfeZgLVi github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.23.0-rc.1 h1:Is/nGODd8OsJlNQSybeYBwY/B6aHrN7+QwVUYutHSgw= -github.com/prometheus/client_golang v1.23.0-rc.1/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -452,8 +452,8 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.66.0 h1:K/rJPHrG3+AoQs50r2+0t7zMnMzek2Vbv31OFVsMeVY= -github.com/prometheus/common v0.66.0/go.mod h1:Ux6NtV1B4LatamKE63tJBntoxD++xmtI/lK0VtEplN4= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= @@ -594,6 +594,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 8850278a21..80194c5068 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" ) var ( diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index c3af43e53a..79730055cb 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -20,7 +20,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/util/testutil" diff --git a/notifier/alertmanagerset.go b/notifier/alertmanagerset.go index 50471098ad..c47c9ea23a 100644 --- a/notifier/alertmanagerset.go +++ b/notifier/alertmanagerset.go @@ -22,7 +22,7 @@ import ( config_util "github.com/prometheus/common/config" "github.com/prometheus/sigv4" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" diff --git a/notifier/manager_test.go b/notifier/manager_test.go index 09ca49e5c7..4aed352e04 100644 --- a/notifier/manager_test.go +++ b/notifier/manager_test.go @@ -32,7 +32,7 @@ import ( "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" diff --git a/plugins/generate.go b/plugins/generate.go index 09a1515a18..2c4ba410f2 100644 --- a/plugins/generate.go +++ b/plugins/generate.go @@ -22,7 +22,7 @@ import ( "path" "path/filepath" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" ) //go:generate go run generate.go diff --git a/rules/alerting.go b/rules/alerting.go index 00e457843e..9a6ff0a113 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/common/model" "go.uber.org/atomic" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" diff --git a/rules/manager_test.go b/rules/manager_test.go index a63ac647e2..76d802f336 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -35,7 +35,7 @@ import ( "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" diff --git a/rules/recording.go b/rules/recording.go index 3b6db210af..2da6885f5b 100644 --- a/rules/recording.go +++ b/rules/recording.go @@ -22,7 +22,7 @@ import ( "time" "go.uber.org/atomic" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" diff --git a/scrape/manager_test.go b/scrape/manager_test.go index c0535c6d2b..4e9feb7e17 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -37,8 +37,8 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" + "go.yaml.in/yaml/v2" "google.golang.org/protobuf/types/known/timestamppb" - "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" diff --git a/storage/remote/azuread/azuread_test.go b/storage/remote/azuread/azuread_test.go index 876c33b288..d581f0218a 100644 --- a/storage/remote/azuread/azuread_test.go +++ b/storage/remote/azuread/azuread_test.go @@ -28,7 +28,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" ) const ( diff --git a/storage/remote/storage.go b/storage/remote/storage.go index 6067b33037..648c91c955 100644 --- a/storage/remote/storage.go +++ b/storage/remote/storage.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" - "gopkg.in/yaml.v2" + "go.yaml.in/yaml/v2" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" diff --git a/web/ui/mantine-ui/src/promql/tools/go.sum b/web/ui/mantine-ui/src/promql/tools/go.sum index 10e26a9c03..e7ed7cec79 100644 --- a/web/ui/mantine-ui/src/promql/tools/go.sum +++ b/web/ui/mantine-ui/src/promql/tools/go.sum @@ -92,8 +92,8 @@ golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +go.yaml.in/yaml/v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +go.yaml.in/yaml/v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= From 979aea1d49f3b3470a26ae354002cf1a4b00c83c Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Mon, 8 Sep 2025 14:34:25 +0200 Subject: [PATCH 21/89] OTLP to directly write to an interface which can hide storage details (#16951) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * OTLP writer writes directly to appender Do not convert to Remote-Write 1.0 protocol. Convert to TSDB Appender interface instead. For downstream projects that still convert OTLP to something else (e.g. Mimir using its own RW 1.0+2.0 compatible protocol), introduce a compatibility layer between OTLP decoding and TSDB Appender. This is the CombinedAppender that hides the implementation. Name is subject to change. --------- Signed-off-by: David Ashpole Signed-off-by: György Krajcsovits Signed-off-by: George Krajcsovits Co-authored-by: David Ashpole Co-authored-by: Jesus Vazquez Co-authored-by: Arve Knudsen --- .../combined_appender.go | 234 +++++ .../combined_appender_test.go | 767 +++++++++++++++ .../prometheusremotewrite/helper.go | 483 ++++------ .../prometheusremotewrite/helper_test.go | 896 +++++++----------- .../prometheusremotewrite/histograms.go | 127 ++- .../prometheusremotewrite/histograms_test.go | 653 +++++++------ .../prometheusremotewrite/metrics_to_prw.go | 150 ++- .../metrics_to_prw_test.go | 524 +++++----- .../number_data_points.go | 74 +- .../number_data_points_test.go | 234 ++--- .../otlp_to_openmetrics_metadata.go | 25 +- .../prometheusremotewrite/timeseries.go | 46 - storage/remote/write_handler.go | 46 +- storage/remote/write_handler_test.go | 52 +- storage/remote/write_test.go | 676 +++++++++++-- web/api/v1/api.go | 1 + 16 files changed, 3077 insertions(+), 1911 deletions(-) create mode 100644 storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go create mode 100644 storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go delete mode 100644 storage/remote/otlptranslator/prometheusremotewrite/timeseries.go diff --git a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go new file mode 100644 index 0000000000..de2c65962d --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go @@ -0,0 +1,234 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(krajorama): rename this package to otlpappender or similar, as it is +// not specific to Prometheus remote write anymore. +// Note otlptranslator is already used by prometheus/otlptranslator repo. +package prometheusremotewrite + +import ( + "errors" + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage" +) + +// Metadata extends metadata.Metadata with the metric family name. +// OTLP calculates the metric family name for all metrics and uses +// it for generating summary, histogram series by adding the magic +// suffixes. The metric family name is passed down to the appender +// in case the storage needs it for metadata updates. +// Known user is Mimir that implements /api/v1/metadata and uses +// Remote-Write 1.0 for this. Might be removed later if no longer +// needed by any downstream project. +type Metadata struct { + metadata.Metadata + MetricFamilyName string +} + +// CombinedAppender is similar to storage.Appender, but combines updates to +// metadata, created timestamps, exemplars and samples into a single call. +type CombinedAppender interface { + // AppendSample appends a sample and related exemplars, metadata, and + // created timestamp to the storage. + AppendSample(ls labels.Labels, meta Metadata, ct, t int64, v float64, es []exemplar.Exemplar) error + // AppendHistogram appends a histogram and related exemplars, metadata, and + // created timestamp to the storage. + AppendHistogram(ls labels.Labels, meta Metadata, ct, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error +} + +// CombinedAppenderMetrics is for the metrics observed by the +// combinedAppender implementation. +type CombinedAppenderMetrics struct { + samplesAppendedWithoutMetadata prometheus.Counter + outOfOrderExemplars prometheus.Counter +} + +func NewCombinedAppenderMetrics(reg prometheus.Registerer) CombinedAppenderMetrics { + return CombinedAppenderMetrics{ + samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "otlp_appended_samples_without_metadata_total", + Help: "The total number of samples ingested from OTLP without corresponding metadata.", + }), + outOfOrderExemplars: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "otlp_out_of_order_exemplars_total", + Help: "The total number of received OTLP exemplars which were rejected because they were out of order.", + }), + } +} + +// NewCombinedAppender creates a combined appender that sets start times and +// updates metadata for each series only once, and appends samples and +// exemplars for each call. +func NewCombinedAppender(app storage.Appender, logger *slog.Logger, ingestCTZeroSample bool, metrics CombinedAppenderMetrics) CombinedAppender { + return &combinedAppender{ + app: app, + logger: logger, + ingestCTZeroSample: ingestCTZeroSample, + refs: make(map[uint64]seriesRef), + samplesAppendedWithoutMetadata: metrics.samplesAppendedWithoutMetadata, + outOfOrderExemplars: metrics.outOfOrderExemplars, + } +} + +type seriesRef struct { + ref storage.SeriesRef + ct int64 + ls labels.Labels + meta metadata.Metadata +} + +type combinedAppender struct { + app storage.Appender + logger *slog.Logger + samplesAppendedWithoutMetadata prometheus.Counter + outOfOrderExemplars prometheus.Counter + ingestCTZeroSample bool + // Used to ensure we only update metadata and created timestamps once, and to share storage.SeriesRefs. + // To detect hash collision it also stores the labels. + // There is no overflow/conflict list, the TSDB will handle that part. + refs map[uint64]seriesRef +} + +func (b *combinedAppender) AppendSample(ls labels.Labels, meta Metadata, ct, t int64, v float64, es []exemplar.Exemplar) (err error) { + return b.appendFloatOrHistogram(ls, meta.Metadata, ct, t, v, nil, es) +} + +func (b *combinedAppender) AppendHistogram(ls labels.Labels, meta Metadata, ct, t int64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) { + if h == nil { + // Sanity check, we should never get here with a nil histogram. + b.logger.Error("Received nil histogram in CombinedAppender.AppendHistogram", "series", ls.String()) + return errors.New("internal error, attempted to append nil histogram") + } + return b.appendFloatOrHistogram(ls, meta.Metadata, ct, t, 0, h, es) +} + +func (b *combinedAppender) appendFloatOrHistogram(ls labels.Labels, meta metadata.Metadata, ct, t int64, v float64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) { + hash := ls.Hash() + series, exists := b.refs[hash] + ref := series.ref + if exists && !labels.Equal(series.ls, ls) { + // Hash collision. The series reference we stored is pointing to a + // different series so we cannot use it, we need to reset the + // reference and cache. + // Note: we don't need to keep track of conflicts here, + // the TSDB will handle that part when we pass 0 reference. + exists = false + ref = 0 + } + updateRefs := !exists || series.ct != ct + if updateRefs && ct != 0 && b.ingestCTZeroSample { + var newRef storage.SeriesRef + if h != nil { + newRef, err = b.app.AppendHistogramCTZeroSample(ref, ls, t, ct, h, nil) + } else { + newRef, err = b.app.AppendCTZeroSample(ref, ls, t, ct) + } + if err != nil { + if !errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario because + // we can't tell if a CT was already ingested in a previous request. + // We ignore the error. + b.logger.Warn("Error when appending CT from OTLP", "err", err, "series", ls.String(), "created_timestamp", ct, "timestamp", t, "sample_type", sampleType(h)) + } + } else { + // We only use the returned reference on success as otherwise an + // error of CT append could invalidate the series reference. + ref = newRef + } + } + { + var newRef storage.SeriesRef + if h != nil { + newRef, err = b.app.AppendHistogram(ref, ls, t, h, nil) + } else { + newRef, err = b.app.Append(ref, ls, t, v) + } + if err != nil { + // Although Append does not currently return ErrDuplicateSampleForTimestamp there is + // a note indicating its inclusion in the future. + if errors.Is(err, storage.ErrOutOfOrderSample) || + errors.Is(err, storage.ErrOutOfBounds) || + errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { + b.logger.Error("Error when appending sample from OTLP", "err", err.Error(), "series", ls.String(), "timestamp", t, "sample_type", sampleType(h)) + } + } else { + // If the append was successful, we can use the returned reference. + ref = newRef + } + } + + if ref == 0 { + // We cannot update metadata or add exemplars on non existent series. + return + } + + if !exists || series.meta.Help != meta.Help || series.meta.Type != meta.Type || series.meta.Unit != meta.Unit { + updateRefs = true + // If this is the first time we see this series, set the metadata. + _, err := b.app.UpdateMetadata(ref, ls, meta) + if err != nil { + b.samplesAppendedWithoutMetadata.Add(1) + b.logger.Warn("Error while updating metadata from OTLP", "err", err) + } + } + + if updateRefs { + b.refs[hash] = seriesRef{ + ref: ref, + ct: ct, + ls: ls, + meta: meta, + } + } + + b.appendExemplars(ref, ls, es) + + return +} + +func sampleType(h *histogram.Histogram) string { + if h == nil { + return "float" + } + return "histogram" +} + +func (b *combinedAppender) appendExemplars(ref storage.SeriesRef, ls labels.Labels, es []exemplar.Exemplar) storage.SeriesRef { + var err error + for _, e := range es { + if ref, err = b.app.AppendExemplar(ref, ls, e); err != nil { + switch { + case errors.Is(err, storage.ErrOutOfOrderExemplar): + b.outOfOrderExemplars.Add(1) + b.logger.Debug("Out of order exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + default: + // Since exemplar storage is still experimental, we don't fail the request on ingestion errors + b.logger.Debug("Error while adding exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) + } + } + } + return ref +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go new file mode 100644 index 0000000000..5652d0b74a --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go @@ -0,0 +1,767 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheusremotewrite + +import ( + "context" + "errors" + "math" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/util/testutil" +) + +type mockCombinedAppender struct { + pendingSamples []combinedSample + pendingHistograms []combinedHistogram + + samples []combinedSample + histograms []combinedHistogram +} + +type combinedSample struct { + metricFamilyName string + ls labels.Labels + meta metadata.Metadata + t int64 + ct int64 + v float64 + es []exemplar.Exemplar +} + +type combinedHistogram struct { + metricFamilyName string + ls labels.Labels + meta metadata.Metadata + t int64 + ct int64 + h *histogram.Histogram + es []exemplar.Exemplar +} + +func (m *mockCombinedAppender) AppendSample(ls labels.Labels, meta Metadata, ct, t int64, v float64, es []exemplar.Exemplar) error { + m.pendingSamples = append(m.pendingSamples, combinedSample{ + metricFamilyName: meta.MetricFamilyName, + ls: ls, + meta: meta.Metadata, + t: t, + ct: ct, + v: v, + es: es, + }) + return nil +} + +func (m *mockCombinedAppender) AppendHistogram(ls labels.Labels, meta Metadata, ct, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error { + m.pendingHistograms = append(m.pendingHistograms, combinedHistogram{ + metricFamilyName: meta.MetricFamilyName, + ls: ls, + meta: meta.Metadata, + t: t, + ct: ct, + h: h, + es: es, + }) + return nil +} + +func (m *mockCombinedAppender) Commit() error { + m.samples = append(m.samples, m.pendingSamples...) + m.pendingSamples = m.pendingSamples[:0] + m.histograms = append(m.histograms, m.pendingHistograms...) + m.pendingHistograms = m.pendingHistograms[:0] + return nil +} + +func requireEqual(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) { + testutil.RequireEqualWithOptions(t, expected, actual, []cmp.Option{cmp.AllowUnexported(combinedSample{}, combinedHistogram{})}, msgAndArgs...) +} + +// TestCombinedAppenderOnTSDB runs some basic tests on a real TSDB to check +// that the combinedAppender works on a real TSDB. +func TestCombinedAppenderOnTSDB(t *testing.T) { + t.Run("ingestCTZeroSample=false", func(t *testing.T) { testCombinedAppenderOnTSDB(t, false) }) + + t.Run("ingestCTZeroSample=true", func(t *testing.T) { testCombinedAppenderOnTSDB(t, true) }) +} + +func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) { + t.Helper() + + now := time.Now() + + testExemplars := []exemplar.Exemplar{ + { + Labels: labels.FromStrings("tracid", "122"), + Value: 1337, + }, + { + Labels: labels.FromStrings("tracid", "132"), + Value: 7777, + }, + } + expectedExemplars := []exemplar.QueryResult{ + { + SeriesLabels: labels.FromStrings( + model.MetricNameLabel, "test_bytes_total", + "foo", "bar", + ), + Exemplars: testExemplars, + }, + } + + seriesLabels := labels.FromStrings( + model.MetricNameLabel, "test_bytes_total", + "foo", "bar", + ) + floatMetadata := Metadata{ + Metadata: metadata.Metadata{ + Type: model.MetricTypeCounter, + Unit: "bytes", + Help: "some help", + }, + MetricFamilyName: "test_bytes_total", + } + + histogramMetadata := Metadata{ + Metadata: metadata.Metadata{ + Type: model.MetricTypeHistogram, + Unit: "bytes", + Help: "some help", + }, + MetricFamilyName: "test_bytes", + } + + testCases := map[string]struct { + appendFunc func(*testing.T, CombinedAppender) + expectedSamples []sample + expectedExemplars []exemplar.QueryResult + }{ + "single float sample, zero CT": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.UnixMilli(), 42.0, testExemplars)) + }, + expectedSamples: []sample{ + { + t: now.UnixMilli(), + f: 42.0, + }, + }, + expectedExemplars: expectedExemplars, + }, + "single float sample, very old CT": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 1, now.UnixMilli(), 42.0, nil)) + }, + expectedSamples: []sample{ + { + t: now.UnixMilli(), + f: 42.0, + }, + }, + }, + "single float sample, normal CT": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-2*time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil)) + }, + expectedSamples: []sample{ + { + ctZero: true, + t: now.Add(-2 * time.Minute).UnixMilli(), + }, + { + t: now.UnixMilli(), + f: 42.0, + }, + }, + }, + "single float sample, CT same time as sample": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), 42.0, nil)) + }, + expectedSamples: []sample{ + { + t: now.UnixMilli(), + f: 42.0, + }, + }, + }, + "single float sample, CT in the future of the sample": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil)) + }, + expectedSamples: []sample{ + { + t: now.UnixMilli(), + f: 42.0, + }, + }, + }, + "single histogram sample, zero CT": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), testExemplars)) + }, + expectedSamples: []sample{ + { + t: now.UnixMilli(), + h: tsdbutil.GenerateTestHistogram(42), + }, + }, + expectedExemplars: expectedExemplars, + }, + "single histogram sample, very old CT": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 1, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) + }, + expectedSamples: []sample{ + { + t: now.UnixMilli(), + h: tsdbutil.GenerateTestHistogram(42), + }, + }, + }, + "single histogram sample, normal CT": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(-2*time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) + }, + expectedSamples: []sample{ + { + ctZero: true, + t: now.Add(-2 * time.Minute).UnixMilli(), + h: &histogram.Histogram{}, + }, + { + t: now.UnixMilli(), + h: tsdbutil.GenerateTestHistogram(42), + }, + }, + }, + "single histogram sample, CT same time as sample": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) + }, + expectedSamples: []sample{ + { + t: now.UnixMilli(), + h: tsdbutil.GenerateTestHistogram(42), + }, + }, + }, + "single histogram sample, CT in the future of the sample": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) + }, + expectedSamples: []sample{ + { + t: now.UnixMilli(), + h: tsdbutil.GenerateTestHistogram(42), + }, + }, + }, + "multiple float samples": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.UnixMilli(), 42.0, nil)) + require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.Add(15*time.Second).UnixMilli(), 62.0, nil)) + }, + expectedSamples: []sample{ + { + t: now.UnixMilli(), + f: 42.0, + }, + { + t: now.Add(15 * time.Second).UnixMilli(), + f: 62.0, + }, + }, + }, + "multiple histogram samples": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil)) + require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.Add(15*time.Second).UnixMilli(), tsdbutil.GenerateTestHistogram(62), nil)) + }, + expectedSamples: []sample{ + { + t: now.UnixMilli(), + h: tsdbutil.GenerateTestHistogram(42), + }, + { + t: now.Add(15 * time.Second).UnixMilli(), + h: tsdbutil.GenerateTestHistogram(62), + }, + }, + }, + "float samples with CT changing": { + appendFunc: func(t *testing.T, app CombinedAppender) { + require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-4*time.Second).UnixMilli(), now.Add(-3*time.Second).UnixMilli(), 42.0, nil)) + require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-1*time.Second).UnixMilli(), now.UnixMilli(), 62.0, nil)) + }, + expectedSamples: []sample{ + { + ctZero: true, + t: now.Add(-4 * time.Second).UnixMilli(), + }, + { + t: now.Add(-3 * time.Second).UnixMilli(), + f: 42.0, + }, + { + ctZero: true, + t: now.Add(-1 * time.Second).UnixMilli(), + }, + { + t: now.UnixMilli(), + f: 62.0, + }, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + dir := t.TempDir() + opts := tsdb.DefaultOptions() + opts.EnableExemplarStorage = true + opts.MaxExemplars = 100 + opts.EnableNativeHistograms = true + db, err := tsdb.Open(dir, promslog.NewNopLogger(), prometheus.NewRegistry(), opts, nil) + require.NoError(t, err) + + t.Cleanup(func() { db.Close() }) + + ctx := context.Background() + reg := prometheus.NewRegistry() + app := db.Appender(ctx) + capp := NewCombinedAppender(app, promslog.NewNopLogger(), ingestCTZeroSample, NewCombinedAppenderMetrics(reg)) + + tc.appendFunc(t, capp) + + require.NoError(t, app.Commit()) + + q, err := db.Querier(int64(math.MinInt64), int64(math.MaxInt64)) + require.NoError(t, err) + + ss := q.Select(ctx, false, &storage.SelectHints{ + Start: int64(math.MinInt64), + End: int64(math.MaxInt64), + }, labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_bytes_total")) + + require.NoError(t, ss.Err()) + + require.True(t, ss.Next()) + series := ss.At() + it := series.Iterator(nil) + for i, sample := range tc.expectedSamples { + if !ingestCTZeroSample && sample.ctZero { + continue + } + if sample.h == nil { + require.Equal(t, chunkenc.ValFloat, it.Next()) + ts, v := it.At() + require.Equal(t, sample.t, ts, "sample ts %d", i) + require.Equal(t, sample.f, v, "sample v %d", i) + } else { + require.Equal(t, chunkenc.ValHistogram, it.Next()) + ts, h := it.AtHistogram(nil) + require.Equal(t, sample.t, ts, "sample ts %d", i) + require.Equal(t, sample.h.Count, h.Count, "sample v %d", i) + } + } + require.False(t, ss.Next()) + + eq, err := db.ExemplarQuerier(ctx) + require.NoError(t, err) + exResult, err := eq.Select(int64(math.MinInt64), int64(math.MaxInt64), []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_bytes_total")}) + require.NoError(t, err) + if tc.expectedExemplars == nil { + tc.expectedExemplars = []exemplar.QueryResult{} + } + require.Equal(t, tc.expectedExemplars, exResult) + }) + } +} + +type sample struct { + ctZero bool + + t int64 + f float64 + h *histogram.Histogram +} + +// TestCombinedAppenderSeriesRefs checks that the combined appender +// correctly uses and updates the series references in the internal map. +func TestCombinedAppenderSeriesRefs(t *testing.T) { + seriesLabels := labels.FromStrings( + model.MetricNameLabel, "test_bytes_total", + "foo", "bar", + ) + + floatMetadata := Metadata{ + Metadata: metadata.Metadata{ + Type: model.MetricTypeCounter, + Unit: "bytes", + Help: "some help", + }, + MetricFamilyName: "test_bytes_total", + } + + t.Run("happy case with CT zero, reference is passed and reused", func(t *testing.T) { + app := &appenderRecorder{} + capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, NewCombinedAppenderMetrics(prometheus.NewRegistry())) + + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil)) + + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 3, 4, 62.0, []exemplar.Exemplar{ + { + Labels: labels.FromStrings("tracid", "122"), + Value: 1337, + }, + })) + + require.Len(t, app.records, 6) + requireEqualOpAndRef(t, "AppendCTZeroSample", 0, app.records[0]) + ref := app.records[0].outRef + require.NotZero(t, ref) + requireEqualOpAndRef(t, "Append", ref, app.records[1]) + requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2]) + requireEqualOpAndRef(t, "AppendCTZeroSample", ref, app.records[3]) + requireEqualOpAndRef(t, "Append", ref, app.records[4]) + requireEqualOpAndRef(t, "AppendExemplar", ref, app.records[5]) + }) + + t.Run("error on second CT ingest doesn't update the reference", func(t *testing.T) { + app := &appenderRecorder{} + capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, NewCombinedAppenderMetrics(prometheus.NewRegistry())) + + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil)) + + app.appendCTZeroSampleError = errors.New("test error") + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 3, 4, 62.0, nil)) + + require.Len(t, app.records, 5) + requireEqualOpAndRef(t, "AppendCTZeroSample", 0, app.records[0]) + ref := app.records[0].outRef + require.NotZero(t, ref) + requireEqualOpAndRef(t, "Append", ref, app.records[1]) + requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2]) + requireEqualOpAndRef(t, "AppendCTZeroSample", ref, app.records[3]) + require.Zero(t, app.records[3].outRef, "the second AppendCTZeroSample returned 0") + requireEqualOpAndRef(t, "Append", ref, app.records[4]) + }) + + t.Run("updateMetadata called when meta help changes", func(t *testing.T) { + app := &appenderRecorder{} + capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, NewCombinedAppenderMetrics(prometheus.NewRegistry())) + + newMetadata := floatMetadata + newMetadata.Help = "some other help" + + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil)) + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, nil)) + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 5, 162.0, nil)) + + require.Len(t, app.records, 7) + requireEqualOpAndRef(t, "AppendCTZeroSample", 0, app.records[0]) + ref := app.records[0].outRef + require.NotZero(t, ref) + requireEqualOpAndRef(t, "Append", ref, app.records[1]) + requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2]) + requireEqualOpAndRef(t, "AppendCTZeroSample", ref, app.records[3]) + requireEqualOpAndRef(t, "Append", ref, app.records[4]) + requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5]) + requireEqualOpAndRef(t, "Append", ref, app.records[6]) + }) + + t.Run("updateMetadata called when meta unit changes", func(t *testing.T) { + app := &appenderRecorder{} + capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, NewCombinedAppenderMetrics(prometheus.NewRegistry())) + + newMetadata := floatMetadata + newMetadata.Unit = "seconds" + + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil)) + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, nil)) + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 5, 162.0, nil)) + + require.Len(t, app.records, 7) + requireEqualOpAndRef(t, "AppendCTZeroSample", 0, app.records[0]) + ref := app.records[0].outRef + require.NotZero(t, ref) + requireEqualOpAndRef(t, "Append", ref, app.records[1]) + requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2]) + requireEqualOpAndRef(t, "AppendCTZeroSample", ref, app.records[3]) + requireEqualOpAndRef(t, "Append", ref, app.records[4]) + requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5]) + requireEqualOpAndRef(t, "Append", ref, app.records[6]) + }) + + t.Run("updateMetadata called when meta type changes", func(t *testing.T) { + app := &appenderRecorder{} + capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, NewCombinedAppenderMetrics(prometheus.NewRegistry())) + + newMetadata := floatMetadata + newMetadata.Type = model.MetricTypeGauge + + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil)) + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, nil)) + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 5, 162.0, nil)) + + require.Len(t, app.records, 7) + requireEqualOpAndRef(t, "AppendCTZeroSample", 0, app.records[0]) + ref := app.records[0].outRef + require.NotZero(t, ref) + requireEqualOpAndRef(t, "Append", ref, app.records[1]) + requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2]) + requireEqualOpAndRef(t, "AppendCTZeroSample", ref, app.records[3]) + requireEqualOpAndRef(t, "Append", ref, app.records[4]) + requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5]) + requireEqualOpAndRef(t, "Append", ref, app.records[6]) + }) + + t.Run("metadata, exemplars are not updated if append failed", func(t *testing.T) { + app := &appenderRecorder{} + capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, NewCombinedAppenderMetrics(prometheus.NewRegistry())) + app.appendError = errors.New("test error") + require.Error(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 0, 1, 42.0, []exemplar.Exemplar{ + { + Labels: labels.FromStrings("tracid", "122"), + Value: 1337, + }, + })) + + require.Len(t, app.records, 1) + require.Equal(t, appenderRecord{ + op: "Append", + ls: labels.FromStrings(model.MetricNameLabel, "test_bytes_total", "foo", "bar"), + }, app.records[0]) + }) + + t.Run("metadata, exemplars are updated if append failed but reference is valid", func(t *testing.T) { + app := &appenderRecorder{} + capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, NewCombinedAppenderMetrics(prometheus.NewRegistry())) + + newMetadata := floatMetadata + newMetadata.Help = "some other help" + + require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil)) + app.appendError = errors.New("test error") + require.Error(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, []exemplar.Exemplar{ + { + Labels: labels.FromStrings("tracid", "122"), + Value: 1337, + }, + })) + + require.Len(t, app.records, 7) + requireEqualOpAndRef(t, "AppendCTZeroSample", 0, app.records[0]) + ref := app.records[0].outRef + require.NotZero(t, ref) + requireEqualOpAndRef(t, "Append", ref, app.records[1]) + requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2]) + requireEqualOpAndRef(t, "AppendCTZeroSample", ref, app.records[3]) + requireEqualOpAndRef(t, "Append", ref, app.records[4]) + require.Zero(t, app.records[4].outRef, "the second Append returned 0") + requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5]) + requireEqualOpAndRef(t, "AppendExemplar", ref, app.records[6]) + }) + + t.Run("simulate conflict with existing series", func(t *testing.T) { + app := &appenderRecorder{} + capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, NewCombinedAppenderMetrics(prometheus.NewRegistry())) + + ls := labels.FromStrings( + model.MetricNameLabel, "test_bytes_total", + "foo", "bar", + ) + + require.NoError(t, capp.AppendSample(ls, floatMetadata, 1, 2, 42.0, nil)) + + hash := ls.Hash() + cappImpl := capp.(*combinedAppender) + series := cappImpl.refs[hash] + series.ls = labels.FromStrings( + model.MetricNameLabel, "test_bytes_total", + "foo", "club", + ) + // The hash and ref remain the same, but we altered the labels. + // This simulates a conflict with an existing series. + cappImpl.refs[hash] = series + + require.NoError(t, capp.AppendSample(ls, floatMetadata, 3, 4, 62.0, []exemplar.Exemplar{ + { + Labels: labels.FromStrings("tracid", "122"), + Value: 1337, + }, + })) + + require.Len(t, app.records, 7) + requireEqualOpAndRef(t, "AppendCTZeroSample", 0, app.records[0]) + ref := app.records[0].outRef + require.NotZero(t, ref) + requireEqualOpAndRef(t, "Append", ref, app.records[1]) + requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2]) + requireEqualOpAndRef(t, "AppendCTZeroSample", 0, app.records[3]) + newRef := app.records[3].outRef + require.NotEqual(t, ref, newRef, "the second AppendCTZeroSample returned a different reference") + requireEqualOpAndRef(t, "Append", newRef, app.records[4]) + requireEqualOpAndRef(t, "UpdateMetadata", newRef, app.records[5]) + requireEqualOpAndRef(t, "AppendExemplar", newRef, app.records[6]) + }) + + t.Run("check that invoking AppendHistogram returns an error for nil histogram", func(t *testing.T) { + app := &appenderRecorder{} + capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, NewCombinedAppenderMetrics(prometheus.NewRegistry())) + + ls := labels.FromStrings( + model.MetricNameLabel, "test_bytes_total", + "foo", "bar", + ) + err := capp.AppendHistogram(ls, Metadata{}, 4, 2, nil, nil) + require.Error(t, err) + }) +} + +func requireEqualOpAndRef(t *testing.T, expectedOp string, expectedRef storage.SeriesRef, actual appenderRecord) { + t.Helper() + require.Equal(t, expectedOp, actual.op) + require.Equal(t, expectedRef, actual.ref) +} + +type appenderRecord struct { + op string + ref storage.SeriesRef + outRef storage.SeriesRef + ls labels.Labels +} + +type appenderRecorder struct { + refcount uint64 + records []appenderRecord + + appendError error + appendCTZeroSampleError error + appendHistogramError error + appendHistogramCTZeroSampleError error + updateMetadataError error + appendExemplarError error +} + +var _ storage.Appender = &appenderRecorder{} + +func (a *appenderRecorder) setOutRef(ref storage.SeriesRef) { + if len(a.records) == 0 { + return + } + a.records[len(a.records)-1].outRef = ref +} + +func (a *appenderRecorder) newRef() storage.SeriesRef { + a.refcount++ + return storage.SeriesRef(a.refcount) +} + +func (a *appenderRecorder) Append(ref storage.SeriesRef, ls labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) { + a.records = append(a.records, appenderRecord{op: "Append", ref: ref, ls: ls}) + if a.appendError != nil { + return 0, a.appendError + } + if ref == 0 { + ref = a.newRef() + } + a.setOutRef(ref) + return ref, nil +} + +func (a *appenderRecorder) AppendCTZeroSample(ref storage.SeriesRef, ls labels.Labels, _, _ int64) (storage.SeriesRef, error) { + a.records = append(a.records, appenderRecord{op: "AppendCTZeroSample", ref: ref, ls: ls}) + if a.appendCTZeroSampleError != nil { + return 0, a.appendCTZeroSampleError + } + if ref == 0 { + ref = a.newRef() + } + a.setOutRef(ref) + return ref, nil +} + +func (a *appenderRecorder) AppendHistogram(ref storage.SeriesRef, ls labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + a.records = append(a.records, appenderRecord{op: "AppendHistogram", ref: ref, ls: ls}) + if a.appendHistogramError != nil { + return 0, a.appendHistogramError + } + if ref == 0 { + ref = a.newRef() + } + a.setOutRef(ref) + return ref, nil +} + +func (a *appenderRecorder) AppendHistogramCTZeroSample(ref storage.SeriesRef, ls labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + a.records = append(a.records, appenderRecord{op: "AppendHistogramCTZeroSample", ref: ref, ls: ls}) + if a.appendHistogramCTZeroSampleError != nil { + return 0, a.appendHistogramCTZeroSampleError + } + if ref == 0 { + ref = a.newRef() + } + a.setOutRef(ref) + return ref, nil +} + +func (a *appenderRecorder) UpdateMetadata(ref storage.SeriesRef, ls labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { + a.records = append(a.records, appenderRecord{op: "UpdateMetadata", ref: ref, ls: ls}) + if a.updateMetadataError != nil { + return 0, a.updateMetadataError + } + a.setOutRef(ref) + return ref, nil +} + +func (a *appenderRecorder) AppendExemplar(ref storage.SeriesRef, ls labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { + a.records = append(a.records, appenderRecord{op: "AppendExemplar", ref: ref, ls: ls}) + if a.appendExemplarError != nil { + return 0, a.appendExemplarError + } + a.setOutRef(ref) + return ref, nil +} + +func (a *appenderRecorder) Commit() error { + a.records = append(a.records, appenderRecord{op: "Commit"}) + return nil +} + +func (a *appenderRecorder) Rollback() error { + a.records = append(a.records, appenderRecord{op: "Rollback"}) + return nil +} + +func (*appenderRecorder) SetOptions(_ *storage.AppendOptions) { + panic("not implemented") +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index b7445b5d67..4c50099e06 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -23,22 +23,22 @@ import ( "log" "math" "slices" - "sort" "strconv" "strings" "time" "unicode/utf8" - "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" "github.com/prometheus/otlptranslator" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/prompb" ) const ( @@ -56,132 +56,71 @@ const ( // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification%2Fmetrics%2Fdatamodel.md#exemplars-2 traceIDKey = "trace_id" spanIDKey = "span_id" - infoType = "info" targetMetricName = "target_info" defaultLookbackDelta = 5 * time.Minute ) -type bucketBoundsData struct { - ts *prompb.TimeSeries - bound float64 -} - -// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds. -type byBucketBoundsData []bucketBoundsData - -func (m byBucketBoundsData) Len() int { return len(m) } -func (m byBucketBoundsData) Less(i, j int) bool { return m[i].bound < m[j].bound } -func (m byBucketBoundsData) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -// ByLabelName enables the usage of sort.Sort() with a slice of labels. -type ByLabelName []prompb.Label - -func (a ByLabelName) Len() int { return len(a) } -func (a ByLabelName) Less(i, j int) bool { return a[i].Name < a[j].Name } -func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// timeSeriesSignature returns a hashed label set signature. -// The label slice should not contain duplicate label names; this method sorts the slice by label name before creating -// the signature. -// The algorithm is the same as in Prometheus' labels.StableHash function. -func timeSeriesSignature(labels []prompb.Label) uint64 { - sort.Sort(ByLabelName(labels)) - - // Use xxhash.Sum64(b) for fast path as it's faster. - b := make([]byte, 0, 1024) - for i, v := range labels { - if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { - // If labels entry is 1KB+ do not allocate whole entry. - h := xxhash.New() - _, _ = h.Write(b) - for _, v := range labels[i:] { - _, _ = h.WriteString(v.Name) - _, _ = h.Write(seps) - _, _ = h.WriteString(v.Value) - _, _ = h.Write(seps) - } - return h.Sum64() - } - - b = append(b, v.Name...) - b = append(b, seps[0]) - b = append(b, v.Value...) - b = append(b, seps[0]) - } - return xxhash.Sum64(b) -} - -var seps = []byte{'\xff'} - // createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values. // Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. // If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels. -func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope scope, settings Settings, - ignoreAttrs []string, logOnOverwrite bool, metadata prompb.MetricMetadata, extras ...string, -) ([]prompb.Label, error) { +func (c *PrometheusConverter) createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope scope, settings Settings, + ignoreAttrs []string, logOnOverwrite bool, meta Metadata, extras ...string, +) (labels.Labels, error) { resourceAttrs := resource.Attributes() serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) - promotedAttrs := settings.PromoteResourceAttributes.promotedAttributes(resourceAttrs) - promoteScope := settings.PromoteScopeMetadata && scope.name != "" - scopeLabelCount := 0 - if promoteScope { - // Include name, version and schema URL. - scopeLabelCount = scope.attributes.Len() + 3 - } - - // Calculate the maximum possible number of labels we could return so we can preallocate l. - maxLabelCount := attributes.Len() + len(settings.ExternalLabels) + len(promotedAttrs) + scopeLabelCount + len(extras)/2 - - if haveServiceName { - maxLabelCount++ - } - if haveInstanceID { - maxLabelCount++ - } - if settings.EnableTypeAndUnitLabels { - maxLabelCount += 2 - } // Ensure attributes are sorted by key for consistent merging of keys which // collide when sanitized. - labels := make([]prompb.Label, 0, maxLabelCount) + c.scratchBuilder.Reset() + // XXX: Should we always drop service namespace/service name/service instance ID from the labels // (as they get mapped to other Prometheus labels)? attributes.Range(func(key string, value pcommon.Value) bool { if !slices.Contains(ignoreAttrs, key) { - labels = append(labels, prompb.Label{Name: key, Value: value.AsString()}) + c.scratchBuilder.Add(key, value.AsString()) } return true }) - sort.Stable(ByLabelName(labels)) + c.scratchBuilder.Sort() + sortedLabels := c.scratchBuilder.Labels() - // map ensures no duplicate label names. - l := make(map[string]string, maxLabelCount) labelNamer := otlptranslator.LabelNamer{UTF8Allowed: settings.AllowUTF8} - for _, label := range labels { - finalKey, err := labelNamer.Build(label.Name) - if err != nil { - return nil, err - } - if existingValue, alreadyExists := l[finalKey]; alreadyExists { - l[finalKey] = existingValue + ";" + label.Value - } else { - l[finalKey] = label.Value + + if settings.AllowUTF8 { + // UTF8 is allowed, so conflicts aren't possible. + c.builder.Reset(sortedLabels) + } else { + // Now that we have sorted and filtered the labels, build the actual list + // of labels, and handle conflicts by appending values. + c.builder.Reset(labels.EmptyLabels()) + var sortErr error + sortedLabels.Range(func(l labels.Label) { + if sortErr != nil { + return + } + finalKey, err := labelNamer.Build(l.Name) + if err != nil { + sortErr = err + return + } + if existingValue := c.builder.Get(finalKey); existingValue != "" { + c.builder.Set(finalKey, existingValue+";"+l.Value) + } else { + c.builder.Set(finalKey, l.Value) + } + }) + if sortErr != nil { + return labels.EmptyLabels(), sortErr } } - for _, lbl := range promotedAttrs { - normalized, err := labelNamer.Build(lbl.Name) - if err != nil { - return nil, err - } - if _, exists := l[normalized]; !exists { - l[normalized] = lbl.Value - } + err := settings.PromoteResourceAttributes.addPromotedAttributes(c.builder, resourceAttrs, settings.AllowUTF8) + if err != nil { + return labels.EmptyLabels(), err } if promoteScope { var rangeErr error @@ -191,25 +130,25 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s rangeErr = err return false } - l[name] = v.AsString() + c.builder.Set(name, v.AsString()) return true }) if rangeErr != nil { - return nil, rangeErr + return labels.EmptyLabels(), rangeErr } // Scope Name, Version and Schema URL are added after attributes to ensure they are not overwritten by attributes. - l["otel_scope_name"] = scope.name - l["otel_scope_version"] = scope.version - l["otel_scope_schema_url"] = scope.schemaURL + c.builder.Set("otel_scope_name", scope.name) + c.builder.Set("otel_scope_version", scope.version) + c.builder.Set("otel_scope_schema_url", scope.schemaURL) } if settings.EnableTypeAndUnitLabels { unitNamer := otlptranslator.UnitNamer{UTF8Allowed: settings.AllowUTF8} - if metadata.Type != prompb.MetricMetadata_UNKNOWN { - l["__type__"] = strings.ToLower(metadata.Type.String()) + if meta.Type != model.MetricTypeUnknown { + c.builder.Set(model.MetricTypeLabel, strings.ToLower(string(meta.Type))) } - if metadata.Unit != "" { - l["__unit__"] = unitNamer.Build(metadata.Unit) + if meta.Unit != "" { + c.builder.Set(model.MetricUnitLabel, unitNamer.Build(meta.Unit)) } } @@ -219,19 +158,19 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s if serviceNamespace, ok := resourceAttrs.Get(conventions.AttributeServiceNamespace); ok { val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val) } - l[model.JobLabel] = val + c.builder.Set(model.JobLabel, val) } // Map service.instance.id to instance. if haveInstanceID { - l[model.InstanceLabel] = instance.AsString() + c.builder.Set(model.InstanceLabel, instance.AsString()) } for key, value := range settings.ExternalLabels { // External labels have already been sanitized. - if _, alreadyExists := l[key]; alreadyExists { + if existingValue := c.builder.Get(key); existingValue != "" { // Skip external labels if they are overridden by metric attributes. continue } - l[key] = value + c.builder.Set(key, value) } for i := 0; i < len(extras); i += 2 { @@ -240,8 +179,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s } name := extras[i] - _, found := l[name] - if found && logOnOverwrite { + if existingValue := c.builder.Get(name); existingValue != "" && logOnOverwrite { log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.") } // internal labels should be maintained. @@ -249,18 +187,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s var err error name, err = labelNamer.Build(name) if err != nil { - return nil, err + return labels.EmptyLabels(), err } } - l[name] = extras[i+1] + c.builder.Set(name, extras[i+1]) } - labels = labels[:0] - for k, v := range l { - labels = append(labels, prompb.Label{Name: k, Value: v}) - } - - return labels, nil + return c.builder.Labels(), nil } func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporality, bool, error) { @@ -286,7 +219,7 @@ func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporali // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, scope scope, + resource pcommon.Resource, settings Settings, scope scope, meta Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -295,44 +228,48 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels, err := createAttributes(resource, pt.Attributes(), scope, settings, nil, false, metadata) + startTimestamp := convertTimeStamp(pt.StartTimestamp()) + baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, meta) if err != nil { return err } + baseName := meta.MetricFamilyName + // If the sum is unset, it indicates the _sum metric point should be // omitted if pt.HasSum() { // treat sum as a sample in an individual TimeSeries - sum := &prompb.Sample{ - Value: pt.Sum(), - Timestamp: timestamp, - } + val := pt.Sum() if pt.Flags().NoRecordedValue() { - sum.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) } - sumlabels := createLabels(metadata.MetricFamilyName+sumStr, baseLabels) - c.addSample(sum, sumlabels) + sumlabels := c.addLabels(baseName+sumStr, baseLabels) + if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil { + return err + } } // treat count as a sample in an individual TimeSeries - count := &prompb.Sample{ - Value: float64(pt.Count()), - Timestamp: timestamp, - } + val := float64(pt.Count()) if pt.Flags().NoRecordedValue() { - count.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) } - countlabels := createLabels(metadata.MetricFamilyName+countStr, baseLabels) - c.addSample(count, countlabels) + countlabels := c.addLabels(baseName+countStr, baseLabels) + if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil { + return err + } + exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) + if err != nil { + return err + } + nextExemplarIdx := 0 // cumulative count for conversion to cumulative histogram var cumulativeCount uint64 - var bucketBounds []bucketBoundsData - // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1 for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -341,33 +278,35 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo bound := pt.ExplicitBounds().At(i) cumulativeCount += pt.BucketCounts().At(i) - bucket := &prompb.Sample{ - Value: float64(cumulativeCount), - Timestamp: timestamp, + + // Find exemplars that belong to this bucket. Both exemplars and + // buckets are sorted in ascending order. + var currentBucketExemplars []exemplar.Exemplar + for ; nextExemplarIdx < len(exemplars); nextExemplarIdx++ { + ex := exemplars[nextExemplarIdx] + if ex.Value > bound { + // This exemplar belongs in a higher bucket. + break + } + currentBucketExemplars = append(currentBucketExemplars, ex) } + val := float64(cumulativeCount) if pt.Flags().NoRecordedValue() { - bucket.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) } boundStr := strconv.FormatFloat(bound, 'f', -1, 64) - labels := createLabels(metadata.MetricFamilyName+bucketStr, baseLabels, leStr, boundStr) - ts := c.addSample(bucket, labels) - - bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: bound}) + labels := c.addLabels(baseName+bucketStr, baseLabels, leStr, boundStr) + if err := c.appender.AppendSample(labels, meta, startTimestamp, timestamp, val, currentBucketExemplars); err != nil { + return err + } } // add le=+Inf bucket - infBucket := &prompb.Sample{ - Timestamp: timestamp, - } + val = float64(pt.Count()) if pt.Flags().NoRecordedValue() { - infBucket.Value = math.Float64frombits(value.StaleNaN) - } else { - infBucket.Value = float64(pt.Count()) + val = math.Float64frombits(value.StaleNaN) } - infLabels := createLabels(metadata.MetricFamilyName+bucketStr, baseLabels, leStr, pInfStr) - ts := c.addSample(infBucket, infLabels) - - bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: math.Inf(1)}) - if err := c.addExemplars(ctx, pt, bucketBounds); err != nil { + infLabels := c.addLabels(baseName+bucketStr, baseLabels, leStr, pInfStr) + if err := c.appender.AppendSample(infLabels, meta, startTimestamp, timestamp, val, exemplars[nextExemplarIdx:]); err != nil { return err } } @@ -375,76 +314,65 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo return nil } -type exemplarType interface { - pmetric.ExponentialHistogramDataPoint | pmetric.HistogramDataPoint | pmetric.NumberDataPoint - Exemplars() pmetric.ExemplarSlice -} - -func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, pt T) ([]prompb.Exemplar, error) { - promExemplars := make([]prompb.Exemplar, 0, pt.Exemplars().Len()) - for i := 0; i < pt.Exemplars().Len(); i++ { - if err := everyN.checkContext(ctx); err != nil { +func (c *PrometheusConverter) getPromExemplars(ctx context.Context, exemplars pmetric.ExemplarSlice) ([]exemplar.Exemplar, error) { + if exemplars.Len() == 0 { + return nil, nil + } + outputExemplars := make([]exemplar.Exemplar, 0, exemplars.Len()) + for i := 0; i < exemplars.Len(); i++ { + if err := c.everyN.checkContext(ctx); err != nil { return nil, err } - exemplar := pt.Exemplars().At(i) + ex := exemplars.At(i) exemplarRunes := 0 - promExemplar := prompb.Exemplar{ - Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()), + ts := timestamp.FromTime(ex.Timestamp().AsTime()) + newExemplar := exemplar.Exemplar{ + Ts: ts, + HasTs: ts != 0, } - switch exemplar.ValueType() { + c.scratchBuilder.Reset() + switch ex.ValueType() { case pmetric.ExemplarValueTypeInt: - promExemplar.Value = float64(exemplar.IntValue()) + newExemplar.Value = float64(ex.IntValue()) case pmetric.ExemplarValueTypeDouble: - promExemplar.Value = exemplar.DoubleValue() + newExemplar.Value = ex.DoubleValue() default: - return nil, fmt.Errorf("unsupported exemplar value type: %v", exemplar.ValueType()) + return nil, fmt.Errorf("unsupported exemplar value type: %v", ex.ValueType()) } - if traceID := exemplar.TraceID(); !traceID.IsEmpty() { + if traceID := ex.TraceID(); !traceID.IsEmpty() { val := hex.EncodeToString(traceID[:]) exemplarRunes += utf8.RuneCountInString(traceIDKey) + utf8.RuneCountInString(val) - promLabel := prompb.Label{ - Name: traceIDKey, - Value: val, - } - promExemplar.Labels = append(promExemplar.Labels, promLabel) + c.scratchBuilder.Add(traceIDKey, val) } - if spanID := exemplar.SpanID(); !spanID.IsEmpty() { + if spanID := ex.SpanID(); !spanID.IsEmpty() { val := hex.EncodeToString(spanID[:]) exemplarRunes += utf8.RuneCountInString(spanIDKey) + utf8.RuneCountInString(val) - promLabel := prompb.Label{ - Name: spanIDKey, - Value: val, - } - promExemplar.Labels = append(promExemplar.Labels, promLabel) + c.scratchBuilder.Add(spanIDKey, val) } - attrs := exemplar.FilteredAttributes() - labelsFromAttributes := make([]prompb.Label, 0, attrs.Len()) + attrs := ex.FilteredAttributes() attrs.Range(func(key string, value pcommon.Value) bool { - val := value.AsString() - exemplarRunes += utf8.RuneCountInString(key) + utf8.RuneCountInString(val) - promLabel := prompb.Label{ - Name: key, - Value: val, - } - - labelsFromAttributes = append(labelsFromAttributes, promLabel) - + exemplarRunes += utf8.RuneCountInString(key) + utf8.RuneCountInString(value.AsString()) return true }) - if exemplarRunes <= maxExemplarRunes { - // only append filtered attributes if it does not cause exemplar - // labels to exceed the max number of runes - promExemplar.Labels = append(promExemplar.Labels, labelsFromAttributes...) - } - promExemplars = append(promExemplars, promExemplar) + // Only append filtered attributes if it does not cause exemplar + // labels to exceed the max number of runes. + if exemplarRunes <= maxExemplarRunes { + attrs.Range(func(key string, value pcommon.Value) bool { + c.scratchBuilder.Add(key, value.AsString()) + return true + }) + } + c.scratchBuilder.Sort() + newExemplar.Labels = c.scratchBuilder.Labels() + outputExemplars = append(outputExemplars, newExemplar) } - return promExemplars, nil + return outputExemplars, nil } // findMinAndMaxTimestamps returns the minimum of minTimestamp and the earliest timestamp in metric and @@ -493,7 +421,7 @@ func findMinAndMaxTimestamps(metric pmetric.Metric, minTimestamp, maxTimestamp p } func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, - settings Settings, metadata prompb.MetricMetadata, scope scope, + settings Settings, scope scope, meta Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -502,122 +430,70 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels, err := createAttributes(resource, pt.Attributes(), scope, settings, nil, false, metadata) + startTimestamp := convertTimeStamp(pt.StartTimestamp()) + baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, meta) if err != nil { return err } + baseName := meta.MetricFamilyName + // treat sum as a sample in an individual TimeSeries - sum := &prompb.Sample{ - Value: pt.Sum(), - Timestamp: timestamp, - } + val := pt.Sum() if pt.Flags().NoRecordedValue() { - sum.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) } // sum and count of the summary should append suffix to baseName - sumlabels := createLabels(metadata.MetricFamilyName+sumStr, baseLabels) - c.addSample(sum, sumlabels) + sumlabels := c.addLabels(baseName+sumStr, baseLabels) + if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil { + return err + } // treat count as a sample in an individual TimeSeries - count := &prompb.Sample{ - Value: float64(pt.Count()), - Timestamp: timestamp, - } + val = float64(pt.Count()) if pt.Flags().NoRecordedValue() { - count.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) + } + countlabels := c.addLabels(baseName+countStr, baseLabels) + if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil { + return err } - countlabels := createLabels(metadata.MetricFamilyName+countStr, baseLabels) - c.addSample(count, countlabels) // process each percentile/quantile for i := 0; i < pt.QuantileValues().Len(); i++ { qt := pt.QuantileValues().At(i) - quantile := &prompb.Sample{ - Value: qt.Value(), - Timestamp: timestamp, - } + val = qt.Value() if pt.Flags().NoRecordedValue() { - quantile.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) } percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) - qtlabels := createLabels(metadata.MetricFamilyName, baseLabels, quantileStr, percentileStr) - c.addSample(quantile, qtlabels) + qtlabels := c.addLabels(baseName, baseLabels, quantileStr, percentileStr) + if err := c.appender.AppendSample(qtlabels, meta, startTimestamp, timestamp, val, nil); err != nil { + return err + } } } return nil } -// createLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name. +// addLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name. // If extras are provided, corresponding label pairs are also added to the returned slice. // If extras is uneven length, the last (unpaired) extra will be ignored. -func createLabels(name string, baseLabels []prompb.Label, extras ...string) []prompb.Label { - extraLabelCount := len(extras) / 2 - labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name - copy(labels, baseLabels) +func (c *PrometheusConverter) addLabels(name string, baseLabels labels.Labels, extras ...string) labels.Labels { + c.builder.Reset(baseLabels) n := len(extras) n -= n % 2 for extrasIdx := 0; extrasIdx < n; extrasIdx += 2 { - labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]}) + c.builder.Set(extras[extrasIdx], extras[extrasIdx+1]) } - - labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name}) - return labels -} - -// addTypeAndUnitLabels appends type and unit labels to the given labels slice. -func addTypeAndUnitLabels(labels []prompb.Label, metadata prompb.MetricMetadata, settings Settings) []prompb.Label { - unitNamer := otlptranslator.UnitNamer{UTF8Allowed: settings.AllowUTF8} - - labels = slices.DeleteFunc(labels, func(l prompb.Label) bool { - return l.Name == "__type__" || l.Name == "__unit__" - }) - - labels = append(labels, prompb.Label{Name: "__type__", Value: strings.ToLower(metadata.Type.String())}) - labels = append(labels, prompb.Label{Name: "__unit__", Value: unitNamer.Build(metadata.Unit)}) - - return labels -} - -// getOrCreateTimeSeries returns the time series corresponding to the label set if existent, and false. -// Otherwise it creates a new one and returns that, and true. -func (c *PrometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*prompb.TimeSeries, bool) { - h := timeSeriesSignature(lbls) - ts := c.unique[h] - if ts != nil { - if isSameMetric(ts, lbls) { - // We already have this metric - return ts, false - } - - // Look for a matching conflict - for _, cTS := range c.conflicts[h] { - if isSameMetric(cTS, lbls) { - // We already have this metric - return cTS, false - } - } - - // New conflict - ts = &prompb.TimeSeries{ - Labels: lbls, - } - c.conflicts[h] = append(c.conflicts[h], ts) - return ts, true - } - - // This metric is new - ts = &prompb.TimeSeries{ - Labels: lbls, - } - c.unique[h] = ts - return ts, true + c.builder.Set(model.MetricNameLabel, name) + return c.builder.Labels() } // addResourceTargetInfo converts the resource to the target info metric. -func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earliestTimestamp, latestTimestamp time.Time, converter *PrometheusConverter) error { +func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, settings Settings, earliestTimestamp, latestTimestamp time.Time) error { if settings.DisableTargetInfo { return nil } @@ -650,17 +526,24 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies // Do not pass identifying attributes as ignoreAttrs below. identifyingAttrs = nil } - labels, err := createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, prompb.MetricMetadata{}, model.MetricNameLabel, name) + meta := Metadata{ + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Target metadata", + }, + MetricFamilyName: name, + } + // TODO: should target info have the __type__ metadata label? + lbls, err := c.createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, Metadata{}, model.MetricNameLabel, name) if err != nil { return err } haveIdentifier := false - for _, l := range labels { + lbls.Range(func(l labels.Label) { if l.Name == model.JobLabel || l.Name == model.InstanceLabel { haveIdentifier = true - break } - } + }) if !haveIdentifier { // We need at least one identifying label to generate target_info. @@ -675,18 +558,12 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies settings.LookbackDelta = defaultLookbackDelta } interval := settings.LookbackDelta / 2 - ts, _ := converter.getOrCreateTimeSeries(labels) for timestamp := earliestTimestamp; timestamp.Before(latestTimestamp); timestamp = timestamp.Add(interval) { - ts.Samples = append(ts.Samples, prompb.Sample{ - Value: float64(1), - Timestamp: timestamp.UnixMilli(), - }) + if err := c.appender.AppendSample(lbls, meta, 0, timestamp.UnixMilli(), float64(1), nil); err != nil { + return err + } } - ts.Samples = append(ts.Samples, prompb.Sample{ - Value: float64(1), - Timestamp: latestTimestamp.UnixMilli(), - }) - return nil + return c.appender.AppendSample(lbls, meta, 0, latestTimestamp.UnixMilli(), float64(1), nil) } // convertTimeStamp converts OTLP timestamp in ns to timestamp in ms. diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go index cee229a3f7..9ecb2c15f7 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -18,15 +18,19 @@ package prometheusremotewrite import ( "context" + "slices" + "strings" "testing" "time" "github.com/prometheus/common/model" + "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/util/testutil" ) @@ -71,134 +75,62 @@ func TestCreateAttributes(t *testing.T) { promoteScope bool ignoreResourceAttributes []string ignoreAttrs []string - expectedLabels []prompb.Label + expectedLabels labels.Labels }{ { name: "Successful conversion without resource attribute promotion and without scope promotion", scope: defaultScope, promoteScope: false, promoteResourceAttributes: nil, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + ), }, { name: "Successful conversion without resource attribute promotion and with scope promotion", scope: defaultScope, promoteScope: true, promoteResourceAttributes: nil, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion without resource attribute promotion and with scope promotion, but without scope", scope: scope{}, promoteResourceAttributes: nil, promoteScope: true, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + ), }, { name: "Successful conversion with some attributes ignored", promoteResourceAttributes: nil, ignoreAttrs: []string{"metric-attr-other"}, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "metric_attr", "metric value", + ), }, { name: "Successful conversion with some attributes ignored and with scope promotion", @@ -206,260 +138,95 @@ func TestCreateAttributes(t *testing.T) { promoteScope: true, promoteResourceAttributes: nil, ignoreAttrs: []string{"metric-attr-other"}, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "metric_attr", "metric value", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion with resource attribute promotion and with scope promotion", scope: defaultScope, promoteResourceAttributes: []string{"non-existent-attr", "existent-attr"}, promoteScope: true, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "existent_attr", - Value: "resource value", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "existent_attr", "resource value", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion with resource attribute promotion and with scope promotion, conflicting resource attributes are ignored", scope: defaultScope, promoteScope: true, promoteResourceAttributes: []string{"non-existent-attr", "existent-attr", "metric-attr", "job", "instance"}, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "existent_attr", - Value: "resource value", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "existent_attr", "resource value", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion with resource attribute promotion and with scope promotion, attributes are only promoted once", scope: defaultScope, promoteScope: true, promoteResourceAttributes: []string{"existent-attr", "existent-attr"}, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "existent_attr", - Value: "resource value", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "existent_attr", "resource value", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion promoting all resource attributes and with scope promotion", scope: defaultScope, promoteAllResourceAttributes: true, promoteScope: true, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "existent_attr", - Value: "resource value", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "service_name", - Value: "service name", - }, - { - Name: "service_instance_id", - Value: "service ID", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "existent_attr", "resource value", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "service_name", "service name", + "service_instance_id", "service ID", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, { name: "Successful conversion promoting all resource attributes and with scope promotion, ignoring 'service.instance.id'", @@ -469,60 +236,25 @@ func TestCreateAttributes(t *testing.T) { ignoreResourceAttributes: []string{ "service.instance.id", }, - expectedLabels: []prompb.Label{ - { - Name: "__name__", - Value: "test_metric", - }, - { - Name: "instance", - Value: "service ID", - }, - { - Name: "job", - Value: "service name", - }, - { - Name: "existent_attr", - Value: "resource value", - }, - { - Name: "metric_attr", - Value: "metric value", - }, - { - Name: "metric_attr_other", - Value: "metric value other", - }, - { - Name: "service_name", - Value: "service name", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - }, + expectedLabels: labels.FromStrings( + "__name__", "test_metric", + "instance", "service ID", + "job", "service name", + "existent_attr", "resource value", + "metric_attr", "metric value", + "metric_attr_other", "metric value other", + "service_name", "service name", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + c := NewPrometheusConverter(&mockCombinedAppender{}) settings := Settings{ PromoteResourceAttributes: NewPromoteResourceAttributes(config.OTLPConfig{ PromoteAllResourceAttributes: tc.promoteAllResourceAttributes, @@ -531,10 +263,10 @@ func TestCreateAttributes(t *testing.T) { }), PromoteScopeMetadata: tc.promoteScope, } - lbls, err := createAttributes(resource, attrs, tc.scope, settings, tc.ignoreAttrs, false, prompb.MetricMetadata{}, model.MetricNameLabel, "test_metric") + lbls, err := c.createAttributes(resource, attrs, tc.scope, settings, tc.ignoreAttrs, false, Metadata{}, model.MetricNameLabel, "test_metric") require.NoError(t, err) - require.ElementsMatch(t, lbls, tc.expectedLabels) + testutil.RequireEqual(t, lbls, tc.expectedLabels) }) } } @@ -575,7 +307,7 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { metric func() pmetric.Metric scope scope promoteScope bool - want func() map[uint64]*prompb.TimeSeries + want func() []combinedSample }{ { name: "summary with start time and without scope promotion", @@ -592,25 +324,25 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - countLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, - } - sumLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(countLabels): { - Labels: countLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + return []combinedSample{ + { + metricFamilyName: "test_summary", + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary"+sumStr, + ), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(sumLabels): { - Labels: sumLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + { + metricFamilyName: "test_summary", + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary"+countStr, + ), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, } }, @@ -630,47 +362,30 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: true, - want: func() map[uint64]*prompb.TimeSeries { - scopeLabels := []prompb.Label{ - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, + want: func() []combinedSample { + scopeLabels := []string{ + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, } - countLabels := append([]prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, - }, scopeLabels...) - sumLabels := append([]prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, - }, scopeLabels...) - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(countLabels): { - Labels: countLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + return []combinedSample{ + { + metricFamilyName: "test_summary", + ls: labels.FromStrings(append(scopeLabels, + model.MetricNameLabel, "test_summary"+sumStr)...), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(sumLabels): { - Labels: sumLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + { + metricFamilyName: "test_summary", + ls: labels.FromStrings(append(scopeLabels, + model.MetricNameLabel, "test_summary"+countStr)...), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, } }, @@ -689,25 +404,85 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - countLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, - } - sumLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(countLabels): { - Labels: countLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + return []combinedSample{ + { + metricFamilyName: "test_summary", + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary"+sumStr, + ), + t: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(sumLabels): { - Labels: sumLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + { + metricFamilyName: "test_summary", + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary"+countStr, + ), + t: convertTimeStamp(ts), + v: 0, + }, + } + }, + }, + { + name: "summary without start time and without scope promotion and some quantiles", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_summary") + metric.SetEmptySummary() + + dp := metric.Summary().DataPoints().AppendEmpty() + dp.SetTimestamp(ts) + dp.SetCount(50) + dp.SetSum(100) + dp.QuantileValues().EnsureCapacity(2) + h := dp.QuantileValues().AppendEmpty() + h.SetQuantile(0.5) + h.SetValue(30) + n := dp.QuantileValues().AppendEmpty() + n.SetQuantile(0.9) + n.SetValue(40) + + return metric + }, + scope: defaultScope, + promoteScope: false, + want: func() []combinedSample { + return []combinedSample{ + { + metricFamilyName: "test_summary", + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary"+sumStr, + ), + t: convertTimeStamp(ts), + v: 100, + }, + { + metricFamilyName: "test_summary", + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary"+countStr, + ), + t: convertTimeStamp(ts), + v: 50, + }, + { + metricFamilyName: "test_summary", + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary", + quantileStr, "0.5", + ), + t: convertTimeStamp(ts), + v: 30, + }, + { + metricFamilyName: "test_summary", + ls: labels.FromStrings( + model.MetricNameLabel, "test_summary", + quantileStr, "0.9", + ), + t: convertTimeStamp(ts), + v: 40, }, } }, @@ -716,7 +491,8 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) converter.addSummaryDataPoints( context.Background(), @@ -725,12 +501,14 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { Settings{ PromoteScopeMetadata: tt.promoteScope, }, - prompb.MetricMetadata{MetricFamilyName: metric.Name()}, tt.scope, + Metadata{ + MetricFamilyName: metric.Name(), + }, ) + require.NoError(t, mockAppender.Commit()) - testutil.RequireEqual(t, tt.want(), converter.unique) - require.Empty(t, converter.conflicts) + requireEqual(t, tt.want(), mockAppender.samples) }) } } @@ -753,7 +531,7 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { metric func() pmetric.Metric scope scope promoteScope bool - want func() map[uint64]*prompb.TimeSeries + want func() []combinedSample }{ { name: "histogram with start time and without scope promotion", @@ -770,26 +548,26 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - countLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist" + countStr}, - } - infLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_bucket"}, - {Name: model.BucketLabel, Value: "+Inf"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(countLabels): { - Labels: countLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + return []combinedSample{ + { + metricFamilyName: "test_hist", + ls: labels.FromStrings( + model.MetricNameLabel, "test_hist"+countStr, + ), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(infLabels): { - Labels: infLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + { + metricFamilyName: "test_hist", + ls: labels.FromStrings( + model.MetricNameLabel, "test_hist_bucket", + model.BucketLabel, "+Inf", + ), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, } }, @@ -809,48 +587,31 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: true, - want: func() map[uint64]*prompb.TimeSeries { - scopeLabels := []prompb.Label{ - { - Name: "otel_scope_attr1", - Value: "value1", - }, - { - Name: "otel_scope_attr2", - Value: "value2", - }, - { - Name: "otel_scope_name", - Value: defaultScope.name, - }, - { - Name: "otel_scope_schema_url", - Value: defaultScope.schemaURL, - }, - { - Name: "otel_scope_version", - Value: defaultScope.version, - }, + want: func() []combinedSample { + scopeLabels := []string{ + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, } - countLabels := append([]prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist" + countStr}, - }, scopeLabels...) - infLabels := append([]prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_bucket"}, - {Name: model.BucketLabel, Value: "+Inf"}, - }, scopeLabels...) - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(countLabels): { - Labels: countLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + return []combinedSample{ + { + metricFamilyName: "test_hist", + ls: labels.FromStrings(append(scopeLabels, + model.MetricNameLabel, "test_hist"+countStr)...), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(infLabels): { - Labels: infLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + { + metricFamilyName: "test_hist", + ls: labels.FromStrings(append(scopeLabels, + model.MetricNameLabel, "test_hist_bucket", + model.BucketLabel, "+Inf")...), + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 0, }, } }, @@ -867,26 +628,24 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { return metric }, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist" + countStr}, - } - infLabels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_bucket"}, - {Name: model.BucketLabel, Value: "+Inf"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(infLabels): { - Labels: infLabels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + return []combinedSample{ + { + metricFamilyName: "test_hist", + ls: labels.FromStrings( + model.MetricNameLabel, "test_hist"+countStr, + ), + t: convertTimeStamp(ts), + v: 0, }, - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + { + metricFamilyName: "test_hist", + ls: labels.FromStrings( + model.MetricNameLabel, "test_hist_bucket", + model.BucketLabel, "+Inf", + ), + t: convertTimeStamp(ts), + v: 0, }, } }, @@ -895,7 +654,8 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) converter.addHistogramDataPoints( context.Background(), @@ -904,47 +664,49 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { Settings{ PromoteScopeMetadata: tt.promoteScope, }, - prompb.MetricMetadata{MetricFamilyName: metric.Name()}, tt.scope, + Metadata{ + MetricFamilyName: metric.Name(), + }, ) + require.NoError(t, mockAppender.Commit()) - require.Equal(t, tt.want(), converter.unique) - require.Empty(t, converter.conflicts) + requireEqual(t, tt.want(), mockAppender.samples) }) } } func TestGetPromExemplars(t *testing.T) { ctx := context.Background() - everyN := &everyNTimes{n: 1} + c := NewPrometheusConverter(&mockCombinedAppender{}) t.Run("Exemplars with int value", func(t *testing.T) { - pt := pmetric.NewNumberDataPoint() - exemplar := pt.Exemplars().AppendEmpty() + es := pmetric.NewExemplarSlice() + exemplar := es.AppendEmpty() exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) exemplar.SetIntValue(42) - exemplars, err := getPromExemplars(ctx, everyN, pt) + exemplars, err := c.getPromExemplars(ctx, es) require.NoError(t, err) require.Len(t, exemplars, 1) require.Equal(t, float64(42), exemplars[0].Value) }) t.Run("Exemplars with double value", func(t *testing.T) { - pt := pmetric.NewNumberDataPoint() - exemplar := pt.Exemplars().AppendEmpty() + es := pmetric.NewExemplarSlice() + exemplar := es.AppendEmpty() exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) exemplar.SetDoubleValue(69.420) - exemplars, err := getPromExemplars(ctx, everyN, pt) + exemplars, err := c.getPromExemplars(ctx, es) require.NoError(t, err) require.Len(t, exemplars, 1) require.Equal(t, 69.420, exemplars[0].Value) }) t.Run("Exemplars with unsupported value type", func(t *testing.T) { - pt := pmetric.NewNumberDataPoint() - exemplar := pt.Exemplars().AppendEmpty() + es := pmetric.NewExemplarSlice() + exemplar := es.AppendEmpty() exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) - _, err := getPromExemplars(ctx, everyN, pt) + _, err := c.getPromExemplars(ctx, es) require.Error(t, err) }) } @@ -1003,3 +765,17 @@ func TestAddTypeAndUnitLabels(t *testing.T) { }) } } + +// addTypeAndUnitLabels appends type and unit labels to the given labels slice. +func addTypeAndUnitLabels(labels []prompb.Label, metadata prompb.MetricMetadata, settings Settings) []prompb.Label { + unitNamer := otlptranslator.UnitNamer{UTF8Allowed: settings.AllowUTF8} + + labels = slices.DeleteFunc(labels, func(l prompb.Label) bool { + return l.Name == "__type__" || l.Name == "__unit__" + }) + + labels = append(labels, prompb.Label{Name: "__type__", Value: strings.ToLower(metadata.Type.String())}) + labels = append(labels, prompb.Label{Name: "__unit__", Value: unitNamer.Build(metadata.Unit)}) + + return labels +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index b6e9ab7d70..a694d2067a 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -27,7 +27,6 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/util/annotations" ) @@ -36,7 +35,8 @@ const defaultZeroThreshold = 1e-128 // addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series // as native histogram samples. func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice, - resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, temporality pmetric.AggregationTemporality, scope scope, + resource pcommon.Resource, settings Settings, temporality pmetric.AggregationTemporality, + scope scope, meta Metadata, ) (annotations.Annotations, error) { var annots annotations.Annotations for x := 0; x < dataPoints.Len(); x++ { @@ -46,34 +46,36 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont pt := dataPoints.At(x) - histogram, ws, err := exponentialToNativeHistogram(pt, temporality) + hp, ws, err := exponentialToNativeHistogram(pt, temporality) annots.Merge(ws) if err != nil { return annots, err } - lbls, err := createAttributes( + lbls, err := c.createAttributes( resource, pt.Attributes(), scope, settings, nil, true, - metadata, + meta, model.MetricNameLabel, - metadata.MetricFamilyName, + meta.MetricFamilyName, ) - if err != nil { - return nil, err - } - ts, _ := c.getOrCreateTimeSeries(lbls) - ts.Histograms = append(ts.Histograms, histogram) - - exemplars, err := getPromExemplars[pmetric.ExponentialHistogramDataPoint](ctx, &c.everyN, pt) if err != nil { return annots, err } - ts.Exemplars = append(ts.Exemplars, exemplars...) + ts := convertTimeStamp(pt.Timestamp()) + ct := convertTimeStamp(pt.StartTimestamp()) + exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) + if err != nil { + return annots, err + } + // OTel exponential histograms are always Int Histograms. + if err = c.appender.AppendHistogram(lbls, meta, ct, ts, hp, exemplars); err != nil { + return annots, err + } } return annots, nil @@ -81,11 +83,11 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont // exponentialToNativeHistogram translates an OTel Exponential Histogram data point // to a Prometheus Native Histogram. -func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (prompb.Histogram, annotations.Annotations, error) { +func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) { var annots annotations.Annotations scale := p.Scale() if scale < -4 { - return prompb.Histogram{}, annots, + return nil, annots, fmt.Errorf("cannot convert exponential to native histogram."+ " Scale must be >= -4, was %d", scale) } @@ -108,41 +110,36 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, tempo // need to know here if it was used for the detection. // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 - resetHint := prompb.Histogram_UNKNOWN + resetHint := histogram.UnknownCounterReset if temporality == pmetric.AggregationTemporalityDelta { // If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting. // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/). // This might be changed to a different hint name as gauge type might be misleading for samples that should be // summed over time. - resetHint = prompb.Histogram_GAUGE + resetHint = histogram.GaugeType } - - h := prompb.Histogram{ - ResetHint: resetHint, - Schema: scale, - - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: p.ZeroCount()}, + h := &histogram.Histogram{ + CounterResetHint: resetHint, + Schema: scale, // TODO use zero_threshold, if set, see // https://github.com/open-telemetry/opentelemetry-proto/pull/441 - ZeroThreshold: defaultZeroThreshold, - - PositiveSpans: pSpans, - PositiveDeltas: pDeltas, - NegativeSpans: nSpans, - NegativeDeltas: nDeltas, - - Timestamp: convertTimeStamp(p.Timestamp()), + ZeroThreshold: defaultZeroThreshold, + ZeroCount: p.ZeroCount(), + PositiveSpans: pSpans, + PositiveBuckets: pDeltas, + NegativeSpans: nSpans, + NegativeBuckets: nDeltas, } if p.Flags().NoRecordedValue() { h.Sum = math.Float64frombits(value.StaleNaN) - h.Count = &prompb.Histogram_CountInt{CountInt: value.StaleNaN} + h.Count = value.StaleNaN } else { if p.HasSum() { h.Sum = p.Sum() } - h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()} + h.Count = p.Count() if p.Count() == 0 && h.Sum != 0 { annots.Add(fmt.Errorf("exponential histogram data point has zero count, but non-zero sum: %f", h.Sum)) } @@ -167,13 +164,13 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, tempo // // When converting from OTel Explicit Histograms to Native Histograms with Custom Buckets, // the bucket indexes are not scaled, and the indices are not adjusted by 1. -func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjustOffset bool) ([]prompb.BucketSpan, []int64) { +func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjustOffset bool) ([]histogram.Span, []int64) { if len(bucketCounts) == 0 { return nil, nil } var ( - spans []prompb.BucketSpan + spans []histogram.Span deltas []int64 count int64 prevCount int64 @@ -196,7 +193,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust initialOffset = initialOffset>>scaleDown + 1 } - spans = append(spans, prompb.BucketSpan{ + spans = append(spans, histogram.Span{ Offset: initialOffset, Length: 0, }) @@ -217,7 +214,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust // We have to create a new span, because we have found a gap // of more than two buckets. The constant 2 is copied from the logic in // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 - spans = append(spans, prompb.BucketSpan{ + spans = append(spans, histogram.Span{ Offset: gap, Length: 0, }) @@ -239,7 +236,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust // We have to create a new span, because we have found a gap // of more than two buckets. The constant 2 is copied from the logic in // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 - spans = append(spans, prompb.BucketSpan{ + spans = append(spans, histogram.Span{ Offset: gap, Length: 0, }) @@ -256,7 +253,8 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust } func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, temporality pmetric.AggregationTemporality, scope scope, + resource pcommon.Resource, settings Settings, temporality pmetric.AggregationTemporality, + scope scope, meta Metadata, ) (annotations.Annotations, error) { var annots annotations.Annotations @@ -267,41 +265,41 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co pt := dataPoints.At(x) - histogram, ws, err := explicitHistogramToCustomBucketsHistogram(pt, temporality) + hp, ws, err := explicitHistogramToCustomBucketsHistogram(pt, temporality) annots.Merge(ws) if err != nil { return annots, err } - lbls, err := createAttributes( + lbls, err := c.createAttributes( resource, pt.Attributes(), scope, settings, nil, true, - metadata, + meta, model.MetricNameLabel, - metadata.MetricFamilyName, + meta.MetricFamilyName, ) - if err != nil { - return nil, err - } - - ts, _ := c.getOrCreateTimeSeries(lbls) - ts.Histograms = append(ts.Histograms, histogram) - - exemplars, err := getPromExemplars[pmetric.HistogramDataPoint](ctx, &c.everyN, pt) if err != nil { return annots, err } - ts.Exemplars = append(ts.Exemplars, exemplars...) + ts := convertTimeStamp(pt.Timestamp()) + ct := convertTimeStamp(pt.StartTimestamp()) + exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) + if err != nil { + return annots, err + } + if err = c.appender.AppendHistogram(lbls, meta, ct, ts, hp, exemplars); err != nil { + return annots, err + } } return annots, nil } -func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, temporality pmetric.AggregationTemporality) (prompb.Histogram, annotations.Annotations, error) { +func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) { var annots annotations.Annotations buckets := p.BucketCounts().AsRaw() @@ -318,23 +316,22 @@ func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, tem // need to know here if it was used for the detection. // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 - resetHint := prompb.Histogram_UNKNOWN + resetHint := histogram.UnknownCounterReset if temporality == pmetric.AggregationTemporalityDelta { // If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting. // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/). // This might be changed to a different hint name as gauge type might be misleading for samples that should be // summed over time. - resetHint = prompb.Histogram_GAUGE + resetHint = histogram.GaugeType } // TODO(carrieedwards): Add setting to limit maximum bucket count - h := prompb.Histogram{ - ResetHint: resetHint, - Schema: histogram.CustomBucketsSchema, - - PositiveSpans: positiveSpans, - PositiveDeltas: positiveDeltas, + h := &histogram.Histogram{ + CounterResetHint: resetHint, + Schema: histogram.CustomBucketsSchema, + PositiveSpans: positiveSpans, + PositiveBuckets: positiveDeltas, // Note: OTel explicit histograms have an implicit +Inf bucket, which has a lower bound // of the last element in the explicit_bounds array. // This is similar to the custom_values array in native histograms with custom buckets. @@ -342,18 +339,16 @@ func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, tem // can be mapped directly to the custom_values array. // See: https://github.com/open-telemetry/opentelemetry-proto/blob/d7770822d70c7bd47a6891fc9faacc66fc4af3d3/opentelemetry/proto/metrics/v1/metrics.proto#L469 CustomValues: p.ExplicitBounds().AsRaw(), - - Timestamp: convertTimeStamp(p.Timestamp()), } if p.Flags().NoRecordedValue() { h.Sum = math.Float64frombits(value.StaleNaN) - h.Count = &prompb.Histogram_CountInt{CountInt: value.StaleNaN} + h.Count = value.StaleNaN } else { if p.HasSum() { h.Sum = p.Sum() } - h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()} + h.Count = p.Count() if p.Count() == 0 && h.Sum != 0 { annots.Add(fmt.Errorf("histogram data point has zero count, but non-zero sum: %f", h.Sum)) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go index 9addcb70cb..7696cc35aa 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -28,11 +28,14 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" - "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" ) type expectedBucketLayout struct { - wantSpans []prompb.BucketSpan + wantSpans []histogram.Span wantDeltas []int64 } @@ -52,7 +55,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 1, Length: 4, @@ -61,7 +64,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -1, -1, -1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 1, Length: 2, @@ -71,7 +74,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{7, -4}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 1, Length: 1, @@ -92,7 +95,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 4, @@ -101,7 +104,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -1, -1, -1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 1, Length: 3, @@ -110,7 +113,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, 1, -4}, // 0+4, 3+2, 1+0 = 4, 5, 1 }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 1, Length: 2, @@ -130,7 +133,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 5, Length: 4, @@ -143,7 +146,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -2, -2, 2, -1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 3, Length: 2, @@ -158,7 +161,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{6, -4, -1}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 1, @@ -185,7 +188,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 5, Length: 4, @@ -198,7 +201,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -2, -2, 2, -1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 3, Length: 2, @@ -213,7 +216,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{6, -4, -1}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 4, @@ -236,7 +239,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: -1, Length: 2, @@ -249,7 +252,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{3, -2, 0}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 3, @@ -260,7 +263,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -4, 1}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 2, @@ -282,7 +285,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: -1, Length: 6, @@ -291,7 +294,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{3, -2, -1, 1, -1, 1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 3, @@ -302,7 +305,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{4, -3, 0}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 2, @@ -324,7 +327,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, wantLayout: map[int32]expectedBucketLayout{ 0: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: -1, Length: 7, @@ -333,7 +336,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{3, -3, 0, 1, -1, 0, 1}, }, 1: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 4, @@ -344,7 +347,7 @@ func TestConvertBucketsLayout(t *testing.T) { wantDeltas: []int64{3, -2, -1, 1}, }, 2: { - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 3, @@ -379,8 +382,8 @@ func TestConvertBucketsLayout(t *testing.T) { for scaleDown, wantLayout := range tt.wantLayout { t.Run(fmt.Sprintf("%s-scaleby-%d", tt.name, scaleDown), func(t *testing.T) { gotSpans, gotDeltas := convertBucketsLayout(tt.buckets().BucketCounts().AsRaw(), tt.buckets().Offset(), scaleDown, true) - require.Equal(t, wantLayout.wantSpans, gotSpans) - require.Equal(t, wantLayout.wantDeltas, gotDeltas) + requireEqual(t, wantLayout.wantSpans, gotSpans) + requireEqual(t, wantLayout.wantDeltas, gotDeltas) }) } } @@ -418,7 +421,7 @@ func TestExponentialToNativeHistogram(t *testing.T) { tests := []struct { name string exponentialHist func() pmetric.ExponentialHistogramDataPoint - wantNativeHist func() prompb.Histogram + wantNativeHist func() *histogram.Histogram wantErrMessage string }{ { @@ -440,18 +443,17 @@ func TestExponentialToNativeHistogram(t *testing.T) { return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Sum: 10.1, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, - NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, - NegativeDeltas: []int64{1, 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, - PositiveDeltas: []int64{1, 0}, - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 4, + Sum: 10.1, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 1, + NegativeSpans: []histogram.Span{{Offset: 2, Length: 2}}, + NegativeBuckets: []int64{1, 0}, + PositiveSpans: []histogram.Span{{Offset: 2, Length: 2}}, + PositiveBuckets: []int64{1, 0}, } }, }, @@ -474,17 +476,16 @@ func TestExponentialToNativeHistogram(t *testing.T) { return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, - NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, - NegativeDeltas: []int64{1, 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, - PositiveDeltas: []int64{1, 0}, - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 4, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 1, + NegativeSpans: []histogram.Span{{Offset: 2, Length: 2}}, + NegativeBuckets: []int64{1, 0}, + PositiveSpans: []histogram.Span{{Offset: 2, Length: 2}}, + PositiveBuckets: []int64{1, 0}, } }, }, @@ -515,18 +516,17 @@ func TestExponentialToNativeHistogram(t *testing.T) { pt.Negative().SetOffset(2) return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 6}, - Sum: 10.1, - Schema: 8, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, - PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 3}}, - PositiveDeltas: []int64{1, 0, 0}, // 1, 1, 1 - NegativeSpans: []prompb.BucketSpan{{Offset: 3, Length: 3}}, - NegativeDeltas: []int64{1, 0, 0}, // 1, 1, 1 - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 6, + Sum: 10.1, + Schema: 8, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 1, + PositiveSpans: []histogram.Span{{Offset: 2, Length: 3}}, + PositiveBuckets: []int64{1, 0, 0}, // 1, 1, 1 + NegativeSpans: []histogram.Span{{Offset: 3, Length: 3}}, + NegativeBuckets: []int64{1, 0, 0}, // 1, 1, 1 } }, }, @@ -547,18 +547,17 @@ func TestExponentialToNativeHistogram(t *testing.T) { pt.Negative().SetOffset(2) return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 6}, - Sum: 10.1, - Schema: 8, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, - PositiveSpans: []prompb.BucketSpan{{Offset: 1, Length: 2}}, - PositiveDeltas: []int64{1, 1}, // 0+1, 1+1 = 1, 2 - NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, - NegativeDeltas: []int64{2, -1}, // 1+1, 1+0 = 2, 1 - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 6, + Sum: 10.1, + Schema: 8, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 1, + PositiveSpans: []histogram.Span{{Offset: 1, Length: 2}}, + PositiveBuckets: []int64{1, 1}, // 0+1, 1+1 = 1, 2 + NegativeSpans: []histogram.Span{{Offset: 2, Length: 2}}, + NegativeBuckets: []int64{2, -1}, // 1+1, 1+0 = 2, 1 } }, }, @@ -599,20 +598,18 @@ func validateExponentialHistogramCount(t *testing.T, h pmetric.ExponentialHistog require.Equal(t, h.Count(), actualCount, "exponential histogram count mismatch") } -func validateNativeHistogramCount(t *testing.T, h prompb.Histogram) { - require.NotNil(t, h.Count) - require.IsType(t, &prompb.Histogram_CountInt{}, h.Count) - want := h.Count.(*prompb.Histogram_CountInt).CountInt +func validateNativeHistogramCount(t *testing.T, h *histogram.Histogram) { + want := h.Count var ( actualCount uint64 prevBucket int64 ) - for _, delta := range h.PositiveDeltas { + for _, delta := range h.PositiveBuckets { prevBucket += delta actualCount += uint64(prevBucket) } prevBucket = 0 - for _, delta := range h.NegativeDeltas { + for _, delta := range h.NegativeBuckets { prevBucket += delta actualCount += uint64(prevBucket) } @@ -636,7 +633,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { metric func() pmetric.Metric scope scope promoteScope bool - wantSeries func() map[uint64]*prompb.TimeSeries + wantSeries func() []combinedHistogram }{ { name: "histogram data points with same labels and without scope promotion", @@ -665,36 +662,43 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist"}, - {Name: "attr", Value: "test_attr"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 7}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{4, -2}, - }, - { - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{4, -2, -1}, - }, + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist", + "attr", "test_attr", + ) + return []combinedHistogram{ + { + metricFamilyName: "test_hist", + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 7, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{4, -2}, }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, - {Value: 2}, + es: []exemplar.Exemplar{{Value: 1}}, + }, + { + metricFamilyName: "test_hist", + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 4, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{4, -2, -1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -726,41 +730,48 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: true, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist"}, - {Name: "attr", Value: "test_attr"}, - {Name: "otel_scope_name", Value: defaultScope.name}, - {Name: "otel_scope_schema_url", Value: defaultScope.schemaURL}, - {Name: "otel_scope_version", Value: defaultScope.version}, - {Name: "otel_scope_attr1", Value: "value1"}, - {Name: "otel_scope_attr2", Value: "value2"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 7}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{4, -2}, - }, - { - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{4, -2, -1}, - }, + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist", + "attr", "test_attr", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ) + return []combinedHistogram{ + { + metricFamilyName: "test_hist", + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 7, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{4, -2}, }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, - {Value: 2}, + es: []exemplar.Exemplar{{Value: 1}}, + }, + { + metricFamilyName: "test_hist", + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 4, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{4, -2, -1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -792,48 +803,48 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist"}, - {Name: "attr", Value: "test_attr"}, - } - labelsAnother := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist"}, - {Name: "attr", Value: "test_attr_two"}, - } + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist", + "attr", "test_attr", + ) + labelsAnother := labels.FromStrings( + model.MetricNameLabel, "test_hist", + "attr", "test_attr_two", + ) - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 7}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{4, -2}, - }, - }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, + return []combinedHistogram{ + { + metricFamilyName: "test_hist", + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 7, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{4, -2}, }, + es: []exemplar.Exemplar{{Value: 1}}, }, - timeSeriesSignature(labelsAnother): { - Labels: labelsAnother, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Schema: 1, - ZeroThreshold: defaultZeroThreshold, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - NegativeSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - NegativeDeltas: []int64{4, -2, -1}, - }, - }, - Exemplars: []prompb.Exemplar{ - {Value: 2}, + { + metricFamilyName: "test_hist", + ls: labelsAnother, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 4, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: 0, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 3}}, + NegativeBuckets: []int64{4, -2, -1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -843,7 +854,8 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) namer := otlptranslator.MetricNamer{ WithMetricSuffixes: true, } @@ -856,15 +868,18 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { Settings{ PromoteScopeMetadata: tt.promoteScope, }, - prompb.MetricMetadata{MetricFamilyName: name}, pmetric.AggregationTemporalityCumulative, tt.scope, + Metadata{ + MetricFamilyName: name, + }, ) require.NoError(t, err) require.Empty(t, annots) - require.Equal(t, tt.wantSeries(), converter.unique) - require.Empty(t, converter.conflicts) + require.NoError(t, mockAppender.Commit()) + + requireEqual(t, tt.wantSeries(), mockAppender.histograms) }) } } @@ -879,7 +894,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "zero offset", buckets: []uint64{4, 3, 2, 1}, wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 4, @@ -892,7 +907,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "leading empty buckets", buckets: []uint64{0, 0, 1, 1, 2, 3}, wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 4, @@ -905,7 +920,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "trailing empty buckets", buckets: []uint64{0, 0, 1, 1, 2, 3, 0, 0}, // TODO: add tests for 3 trailing buckets wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 6, @@ -918,7 +933,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "bucket gap of 2", buckets: []uint64{1, 2, 0, 0, 2}, wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 5, @@ -931,7 +946,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "bucket gap > 2", buckets: []uint64{1, 2, 0, 0, 0, 2, 4, 4}, wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 0, Length: 2, @@ -948,7 +963,7 @@ func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { name: "multiple bucket gaps", buckets: []uint64{0, 0, 1, 2, 0, 0, 0, 2, 4, 4, 0, 0}, wantLayout: expectedBucketLayout{ - wantSpans: []prompb.BucketSpan{ + wantSpans: []histogram.Span{ { Offset: 2, Length: 2, @@ -1007,7 +1022,7 @@ func TestHistogramToCustomBucketsHistogram(t *testing.T) { tests := []struct { name string hist func() pmetric.HistogramDataPoint - wantNativeHist func() prompb.Histogram + wantNativeHist func() *histogram.Histogram wantErrMessage string }{ { @@ -1023,15 +1038,14 @@ func TestHistogramToCustomBucketsHistogram(t *testing.T) { pt.ExplicitBounds().FromRaw([]float64{0, 1}) return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 2}, - Sum: 10.1, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{1, 0}, - CustomValues: []float64{0, 1}, - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 2, + Sum: 10.1, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{1, 0}, + CustomValues: []float64{0, 1}, } }, }, @@ -1047,14 +1061,13 @@ func TestHistogramToCustomBucketsHistogram(t *testing.T) { pt.ExplicitBounds().FromRaw([]float64{0, 1}) return pt }, - wantNativeHist: func() prompb.Histogram { - return prompb.Histogram{ - Count: &prompb.Histogram_CountInt{CountInt: 4}, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{2, 0}, - CustomValues: []float64{0, 1}, - Timestamp: 500, + wantNativeHist: func() *histogram.Histogram { + return &histogram.Histogram{ + Count: 4, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{2, 0}, + CustomValues: []float64{0, 1}, } }, }, @@ -1093,7 +1106,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { metric func() pmetric.Metric scope scope promoteScope bool - wantSeries func() map[uint64]*prompb.TimeSeries + wantSeries func() []combinedHistogram }{ { name: "histogram data points with same labels and without scope promotion", @@ -1122,36 +1135,43 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_to_nhcb"}, - {Name: "attr", Value: "test_attr"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 3}, - Sum: 3, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{2, -2, 1}, - CustomValues: []float64{5, 10}, - }, - { - Count: &prompb.Histogram_CountInt{CountInt: 11}, - Sum: 5, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{3, 5, -8}, - CustomValues: []float64{0, 1}, - }, + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist_to_nhcb", + "attr", "test_attr", + ) + return []combinedHistogram{ + { + metricFamilyName: "test_hist_to_nhcb", + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 3, + Sum: 3, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{2, -2, 1}, + CustomValues: []float64{5, 10}, }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, - {Value: 2}, + es: []exemplar.Exemplar{{Value: 1}}, + }, + { + metricFamilyName: "test_hist_to_nhcb", + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 11, + Sum: 5, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{3, 5, -8}, + CustomValues: []float64{0, 1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -1183,41 +1203,48 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: true, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_to_nhcb"}, - {Name: "attr", Value: "test_attr"}, - {Name: "otel_scope_name", Value: defaultScope.name}, - {Name: "otel_scope_schema_url", Value: defaultScope.schemaURL}, - {Name: "otel_scope_version", Value: defaultScope.version}, - {Name: "otel_scope_attr1", Value: "value1"}, - {Name: "otel_scope_attr2", Value: "value2"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 3}, - Sum: 3, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{2, -2, 1}, - CustomValues: []float64{5, 10}, - }, - { - Count: &prompb.Histogram_CountInt{CountInt: 11}, - Sum: 5, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, - PositiveDeltas: []int64{3, 5, -8}, - CustomValues: []float64{0, 1}, - }, + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist_to_nhcb", + "attr", "test_attr", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ) + return []combinedHistogram{ + { + metricFamilyName: "test_hist_to_nhcb", + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 3, + Sum: 3, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{2, -2, 1}, + CustomValues: []float64{5, 10}, }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, - {Value: 2}, + es: []exemplar.Exemplar{{Value: 1}}, + }, + { + metricFamilyName: "test_hist_to_nhcb", + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 11, + Sum: 5, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{3, 5, -8}, + CustomValues: []float64{0, 1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -1249,48 +1276,48 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - wantSeries: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_to_nhcb"}, - {Name: "attr", Value: "test_attr"}, - } - labelsAnother := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_hist_to_nhcb"}, - {Name: "attr", Value: "test_attr_two"}, - } + wantSeries: func() []combinedHistogram { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_hist_to_nhcb", + "attr", "test_attr", + ) + labelsAnother := labels.FromStrings( + model.MetricNameLabel, "test_hist_to_nhcb", + "attr", "test_attr_two", + ) - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 6}, - Sum: 3, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{4, -2}, - CustomValues: []float64{0, 1}, - }, - }, - Exemplars: []prompb.Exemplar{ - {Value: 1}, + return []combinedHistogram{ + { + metricFamilyName: "test_hist_to_nhcb", + ls: lbls, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 6, + Sum: 3, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{4, -2}, + CustomValues: []float64{0, 1}, }, + es: []exemplar.Exemplar{{Value: 1}}, }, - timeSeriesSignature(labelsAnother): { - Labels: labelsAnother, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 11}, - Sum: 5, - Schema: -53, - PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, - PositiveDeltas: []int64{3, 5}, - CustomValues: []float64{0, 1}, - }, - }, - Exemplars: []prompb.Exemplar{ - {Value: 2}, + { + metricFamilyName: "test_hist_to_nhcb", + ls: labelsAnother, + meta: metadata.Metadata{}, + t: 0, + ct: 0, + h: &histogram.Histogram{ + Count: 11, + Sum: 5, + Schema: -53, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{3, 5}, + CustomValues: []float64{0, 1}, }, + es: []exemplar.Exemplar{{Value: 2}}, }, } }, @@ -1300,7 +1327,8 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) namer := otlptranslator.MetricNamer{ WithMetricSuffixes: true, } @@ -1314,16 +1342,19 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { ConvertHistogramsToNHCB: true, PromoteScopeMetadata: tt.promoteScope, }, - prompb.MetricMetadata{MetricFamilyName: name}, pmetric.AggregationTemporalityCumulative, tt.scope, + Metadata{ + MetricFamilyName: name, + }, ) require.NoError(t, err) require.Empty(t, annots) - require.Equal(t, tt.wantSeries(), converter.unique) - require.Empty(t, converter.conflicts) + require.NoError(t, mockAppender.Commit()) + + requireEqual(t, tt.wantSeries(), mockAppender.histograms) }) } } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 7de00154a6..3dfd54cc9f 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "math" - "sort" "time" "github.com/prometheus/otlptranslator" @@ -30,7 +29,8 @@ import ( "go.uber.org/multierr" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/util/annotations" ) @@ -58,16 +58,17 @@ type Settings struct { // PrometheusConverter converts from OTel write format to Prometheus remote write format. type PrometheusConverter struct { - unique map[uint64]*prompb.TimeSeries - conflicts map[uint64][]*prompb.TimeSeries - everyN everyNTimes - metadata []prompb.MetricMetadata + everyN everyNTimes + scratchBuilder labels.ScratchBuilder + builder *labels.Builder + appender CombinedAppender } -func NewPrometheusConverter() *PrometheusConverter { +func NewPrometheusConverter(appender CombinedAppender) *PrometheusConverter { return &PrometheusConverter{ - unique: map[uint64]*prompb.TimeSeries{}, - conflicts: map[uint64][]*prompb.TimeSeries{}, + scratchBuilder: labels.NewScratchBuilder(0), + builder: labels.NewBuilder(labels.EmptyLabels()), + appender: appender, } } @@ -120,6 +121,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric WithMetricSuffixes: settings.AddMetricSuffixes, UTF8Allowed: settings.AllowUTF8, } + unitNamer := otlptranslator.UnitNamer{} c.everyN = everyNTimes{n: 128} resourceMetricsSlice := md.ResourceMetrics() @@ -130,7 +132,6 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric numMetrics += scopeMetricsSlice.At(j).Metrics().Len() } } - c.metadata = make([]prompb.MetricMetadata, 0, numMetrics) for i := 0; i < resourceMetricsSlice.Len(); i++ { resourceMetrics := resourceMetricsSlice.At(i) @@ -175,13 +176,14 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, err) continue } - metadata := prompb.MetricMetadata{ - Type: otelMetricTypeToPromMetricType(metric), + meta := Metadata{ + Metadata: metadata.Metadata{ + Type: otelMetricTypeToPromMetricType(metric), + Unit: unitNamer.Build(metric.Unit()), + Help: metric.Description(), + }, MetricFamilyName: promName, - Help: metric.Description(), - Unit: metric.Unit(), } - c.metadata = append(c.metadata, metadata) // handle individual metrics based on type //exhaustive:enforce @@ -192,7 +194,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, metadata, scope); err != nil { + if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -204,7 +206,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, settings, metadata, scope); err != nil { + if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -218,7 +220,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric } if settings.ConvertHistogramsToNHCB { ws, err := c.addCustomBucketsHistogramDataPoints( - ctx, dataPoints, resource, settings, metadata, temporality, scope, + ctx, dataPoints, resource, settings, temporality, scope, meta, ) annots.Merge(ws) if err != nil { @@ -228,7 +230,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric } } } else { - if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, metadata, scope); err != nil { + if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -246,9 +248,9 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric dataPoints, resource, settings, - metadata, temporality, scope, + meta, ) annots.Merge(ws) if err != nil { @@ -263,7 +265,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, metadata, scope); err != nil { + if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil { errs = multierr.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return @@ -277,72 +279,13 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric if earliestTimestamp < pcommon.Timestamp(math.MaxUint64) { // We have at least one metric sample for this resource. // Generate a corresponding target_info series. - err := addResourceTargetInfo(resource, settings, earliestTimestamp.AsTime(), latestTimestamp.AsTime(), c) - if err != nil { + if err := c.addResourceTargetInfo(resource, settings, earliestTimestamp.AsTime(), latestTimestamp.AsTime()); err != nil { errs = multierr.Append(errs, err) } } } - return annots, errs -} - -func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool { - if len(ts.Labels) != len(lbls) { - return false - } - for i, l := range ts.Labels { - if l.Name != ts.Labels[i].Name || l.Value != ts.Labels[i].Value { - return false - } - } - return true -} - -// addExemplars adds exemplars for the dataPoint. For each exemplar, if it can find a bucket bound corresponding to its value, -// the exemplar is added to the bucket bound's time series, provided that the time series' has samples. -func (c *PrometheusConverter) addExemplars(ctx context.Context, dataPoint pmetric.HistogramDataPoint, bucketBounds []bucketBoundsData) error { - if len(bucketBounds) == 0 { - return nil - } - - exemplars, err := getPromExemplars(ctx, &c.everyN, dataPoint) - if err != nil { - return err - } - if len(exemplars) == 0 { - return nil - } - - sort.Sort(byBucketBoundsData(bucketBounds)) - for _, exemplar := range exemplars { - for _, bound := range bucketBounds { - if err := c.everyN.checkContext(ctx); err != nil { - return err - } - if len(bound.ts.Samples) > 0 && exemplar.Value <= bound.bound { - bound.ts.Exemplars = append(bound.ts.Exemplars, exemplar) - break - } - } - } - - return nil -} - -// addSample finds a TimeSeries that corresponds to lbls, and adds sample to it. -// If there is no corresponding TimeSeries already, it's created. -// The corresponding TimeSeries is returned. -// If either lbls is nil/empty or sample is nil, nothing is done. -func (c *PrometheusConverter) addSample(sample *prompb.Sample, lbls []prompb.Label) *prompb.TimeSeries { - if sample == nil || len(lbls) == 0 { - // This shouldn't happen - return nil - } - - ts, _ := c.getOrCreateTimeSeries(lbls) - ts.Samples = append(ts.Samples, *sample) - return ts + return } func NewPromoteResourceAttributes(otlpCfg config.OTLPConfig) *PromoteResourceAttributes { @@ -360,30 +303,43 @@ func NewPromoteResourceAttributes(otlpCfg config.OTLPConfig) *PromoteResourceAtt } } -// promotedAttributes returns labels for promoted resourceAttributes. -func (s *PromoteResourceAttributes) promotedAttributes(resourceAttributes pcommon.Map) []prompb.Label { +// addPromotedAttributes adds labels for promoted resourceAttributes to the builder. +func (s *PromoteResourceAttributes) addPromotedAttributes(builder *labels.Builder, resourceAttributes pcommon.Map, allowUTF8 bool) error { if s == nil { return nil } - var promotedAttrs []prompb.Label + labelNamer := otlptranslator.LabelNamer{UTF8Allowed: allowUTF8} if s.promoteAll { - promotedAttrs = make([]prompb.Label, 0, resourceAttributes.Len()) + var err error resourceAttributes.Range(func(name string, value pcommon.Value) bool { if _, exists := s.attrs[name]; !exists { - promotedAttrs = append(promotedAttrs, prompb.Label{Name: name, Value: value.AsString()}) - } - return true - }) - } else { - promotedAttrs = make([]prompb.Label, 0, len(s.attrs)) - resourceAttributes.Range(func(name string, value pcommon.Value) bool { - if _, exists := s.attrs[name]; exists { - promotedAttrs = append(promotedAttrs, prompb.Label{Name: name, Value: value.AsString()}) + var normalized string + normalized, err = labelNamer.Build(name) + if err != nil { + return false + } + if builder.Get(normalized) == "" { + builder.Set(normalized, value.AsString()) + } } return true }) + return err } - sort.Stable(ByLabelName(promotedAttrs)) - return promotedAttrs + var err error + resourceAttributes.Range(func(name string, value pcommon.Value) bool { + if _, exists := s.attrs[name]; exists { + var normalized string + normalized, err = labelNamer.Build(name) + if err != nil { + return false + } + if builder.Get(normalized) == "" { + builder.Set(normalized, value.AsString()) + } + } + return true + }) + return err } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go index b03b28e4cd..6fd2c1e05e 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -19,19 +19,23 @@ package prometheusremotewrite import ( "context" "fmt" - "sort" "testing" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/prompb" - "github.com/prometheus/prometheus/util/testutil" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage" ) func TestFromMetrics(t *testing.T) { @@ -77,9 +81,9 @@ func TestFromMetrics(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) payload, wantPromMetrics := createExportRequest(5, 128, 128, 2, 0, tc.settings, tc.temporality) - var expMetadata []prompb.MetricMetadata seenFamilyNames := map[string]struct{}{} for _, wantMetric := range wantPromMetrics { if _, exists := seenFamilyNames[wantMetric.familyName]; exists { @@ -90,12 +94,6 @@ func TestFromMetrics(t *testing.T) { } seenFamilyNames[wantMetric.familyName] = struct{}{} - expMetadata = append(expMetadata, prompb.MetricMetadata{ - Type: wantMetric.metricType, - MetricFamilyName: wantMetric.familyName, - Help: wantMetric.description, - Unit: wantMetric.unit, - }) } annots, err := converter.FromMetrics( @@ -106,15 +104,14 @@ func TestFromMetrics(t *testing.T) { require.NoError(t, err) require.Empty(t, annots) - testutil.RequireEqual(t, expMetadata, converter.Metadata()) + require.NoError(t, mockAppender.Commit()) - ts := converter.TimeSeries() + ts := mockAppender.samples require.Len(t, ts, 1536+1) // +1 for the target_info. tgtInfoCount := 0 for _, s := range ts { - b := labels.NewScratchBuilder(2) - lbls := s.ToLabels(&b, nil) + lbls := s.ls if lbls.Get(labels.MetricName) == "target_info" { tgtInfoCount++ require.Equal(t, "test-namespace/test-service", lbls.Get("job")) @@ -156,7 +153,8 @@ func TestFromMetrics(t *testing.T) { generateAttributes(h.Attributes(), "series", 1) - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) annots, err := converter.FromMetrics( context.Background(), request.Metrics(), @@ -164,25 +162,21 @@ func TestFromMetrics(t *testing.T) { ) require.NoError(t, err) require.Empty(t, annots) - - series := converter.TimeSeries() + require.NoError(t, mockAppender.Commit()) if convertHistogramsToNHCB { - require.Len(t, series[0].Histograms, 1) - require.Empty(t, series[0].Samples) + require.Len(t, mockAppender.histograms, 1) + require.Empty(t, mockAppender.samples) } else { - require.Len(t, series, 3) - for i := range series { - require.Len(t, series[i].Samples, 1) - require.Nil(t, series[i].Histograms) - } + require.Empty(t, mockAppender.histograms) + require.Len(t, mockAppender.samples, 3) } }) } t.Run("context cancellation", func(t *testing.T) { settings := Settings{} - converter := NewPrometheusConverter() + converter := NewPrometheusConverter(&mockCombinedAppender{}) ctx, cancel := context.WithCancel(context.Background()) // Verify that converter.FromMetrics respects cancellation. cancel() @@ -195,7 +189,7 @@ func TestFromMetrics(t *testing.T) { t.Run("context timeout", func(t *testing.T) { settings := Settings{} - converter := NewPrometheusConverter() + converter := NewPrometheusConverter(&mockCombinedAppender{}) // Verify that converter.FromMetrics respects timeout. ctx, cancel := context.WithTimeout(context.Background(), 0) t.Cleanup(cancel) @@ -228,7 +222,7 @@ func TestFromMetrics(t *testing.T) { generateAttributes(h.Attributes(), "series", 10) } - converter := NewPrometheusConverter() + converter := NewPrometheusConverter(&mockCombinedAppender{}) annots, err := converter.FromMetrics(context.Background(), request.Metrics(), Settings{}) require.NoError(t, err) require.NotEmpty(t, annots) @@ -261,7 +255,7 @@ func TestFromMetrics(t *testing.T) { generateAttributes(h.Attributes(), "series", 10) } - converter := NewPrometheusConverter() + converter := NewPrometheusConverter(&mockCombinedAppender{}) annots, err := converter.FromMetrics( context.Background(), request.Metrics(), @@ -292,7 +286,6 @@ func TestFromMetrics(t *testing.T) { metrics := rm.ScopeMetrics().AppendEmpty().Metrics() ts := pcommon.NewTimestampFromTime(time.Now()) - var expMetadata []prompb.MetricMetadata for i := range 3 { m := metrics.AppendEmpty() m.SetEmptyGauge() @@ -308,19 +301,10 @@ func TestFromMetrics(t *testing.T) { generateAttributes(point.Attributes(), "series", 2) curTs = curTs.Add(defaultLookbackDelta / 4) } - - namer := otlptranslator.MetricNamer{} - name, err := namer.Build(TranslatorMetricFromOtelMetric(m)) - require.NoError(t, err) - expMetadata = append(expMetadata, prompb.MetricMetadata{ - Type: otelMetricTypeToPromMetricType(m), - MetricFamilyName: name, - Help: m.Description(), - Unit: m.Unit(), - }) } - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) annots, err := converter.FromMetrics( context.Background(), request.Metrics(), @@ -330,46 +314,54 @@ func TestFromMetrics(t *testing.T) { ) require.NoError(t, err) require.Empty(t, annots) - - testutil.RequireEqual(t, expMetadata, converter.Metadata()) - - timeSeries := converter.TimeSeries() - tgtInfoCount := 0 - for _, s := range timeSeries { - b := labels.NewScratchBuilder(2) - lbls := s.ToLabels(&b, nil) - if lbls.Get(labels.MetricName) != "target_info" { - continue - } - - tgtInfoCount++ - require.Equal(t, "test-namespace/test-service", lbls.Get("job")) - require.Equal(t, "id1234", lbls.Get("instance")) - require.False(t, lbls.Has("service_name")) - require.False(t, lbls.Has("service_namespace")) - require.False(t, lbls.Has("service_instance_id")) - // There should be a target_info sample at the earliest metric timestamp, then two spaced lookback delta/2 apart, - // then one at the latest metric timestamp. - testutil.RequireEqual(t, []prompb.Sample{ - { - Value: 1, - Timestamp: ts.AsTime().UnixMilli(), - }, - { - Value: 1, - Timestamp: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(), - }, - { - Value: 1, - Timestamp: ts.AsTime().Add(defaultLookbackDelta).UnixMilli(), - }, - { - Value: 1, - Timestamp: ts.AsTime().Add(defaultLookbackDelta + defaultLookbackDelta/4).UnixMilli(), - }, - }, s.Samples) + require.NoError(t, mockAppender.Commit()) + require.Len(t, mockAppender.samples, 22) + // There should be a target_info sample at the earliest metric timestamp, then two spaced lookback delta/2 apart, + // then one at the latest metric timestamp. + targetInfoLabels := labels.FromStrings( + "__name__", "target_info", + "instance", "id1234", + "job", "test-namespace/test-service", + "resource_name_1", "value-1", + "resource_name_2", "value-2", + "resource_name_3", "value-3", + "resource_name_4", "value-4", + "resource_name_5", "value-5", + ) + targetInfoMeta := metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Target metadata", } - require.Equal(t, 1, tgtInfoCount) + requireEqual(t, []combinedSample{ + { + metricFamilyName: "target_info", + v: 1, + t: ts.AsTime().UnixMilli(), + ls: targetInfoLabels, + meta: targetInfoMeta, + }, + { + metricFamilyName: "target_info", + v: 1, + t: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(), + ls: targetInfoLabels, + meta: targetInfoMeta, + }, + { + metricFamilyName: "target_info", + v: 1, + t: ts.AsTime().Add(defaultLookbackDelta).UnixMilli(), + ls: targetInfoLabels, + meta: targetInfoMeta, + }, + { + metricFamilyName: "target_info", + v: 1, + t: ts.AsTime().Add(defaultLookbackDelta + defaultLookbackDelta/4).UnixMilli(), + ls: targetInfoLabels, + meta: targetInfoMeta, + }, + }, mockAppender.samples[len(mockAppender.samples)-4:]) }) } @@ -377,12 +369,13 @@ func TestTemporality(t *testing.T) { ts := time.Unix(100, 0) tests := []struct { - name string - allowDelta bool - convertToNHCB bool - inputSeries []pmetric.Metric - expectedSeries []prompb.TimeSeries - expectedError string + name string + allowDelta bool + convertToNHCB bool + inputSeries []pmetric.Metric + expectedSamples []combinedSample + expectedHistograms []combinedHistogram + expectedError string }{ { name: "all cumulative when delta not allowed", @@ -391,9 +384,9 @@ func TestTemporality(t *testing.T) { createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_metric_1", ts), - createPromFloatSeries("test_metric_2", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter), + createPromFloatSeries("test_metric_2", ts, model.MetricTypeCounter), }, }, { @@ -403,9 +396,9 @@ func TestTemporality(t *testing.T) { createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_metric_1", ts), - createPromFloatSeries("test_metric_2", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_metric_1", ts, model.MetricTypeUnknown), + createPromFloatSeries("test_metric_2", ts, model.MetricTypeUnknown), }, }, { @@ -415,9 +408,9 @@ func TestTemporality(t *testing.T) { createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_metric_1", ts), - createPromFloatSeries("test_metric_2", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_metric_1", ts, model.MetricTypeUnknown), + createPromFloatSeries("test_metric_2", ts, model.MetricTypeCounter), }, }, { @@ -427,8 +420,8 @@ func TestTemporality(t *testing.T) { createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_metric_1", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter), }, expectedError: `invalid temporality and type combination for metric "test_metric_2"`, }, @@ -439,8 +432,8 @@ func TestTemporality(t *testing.T) { createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts), createOtelSum("test_metric_2", pmetric.AggregationTemporalityUnspecified, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_metric_1", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_metric_1", ts, model.MetricTypeCounter), }, expectedError: `invalid temporality and type combination for metric "test_metric_2"`, }, @@ -450,8 +443,8 @@ func TestTemporality(t *testing.T) { inputSeries: []pmetric.Metric{ createOtelExponentialHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNativeHistogramSeries("test_histogram", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNativeHistogramSeries("test_histogram", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { @@ -461,9 +454,9 @@ func TestTemporality(t *testing.T) { createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNativeHistogramSeries("test_histogram_1", prompb.Histogram_GAUGE, ts), - createPromNativeHistogramSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNativeHistogramSeries("test_histogram_1", histogram.GaugeType, ts, model.MetricTypeUnknown), + createPromNativeHistogramSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { @@ -473,8 +466,8 @@ func TestTemporality(t *testing.T) { createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNativeHistogramSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNativeHistogramSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, expectedError: `invalid temporality and type combination for metric "test_histogram_1"`, }, @@ -485,8 +478,8 @@ func TestTemporality(t *testing.T) { inputSeries: []pmetric.Metric{ createOtelExplicitHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNHCBSeries("test_histogram", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNHCBSeries("test_histogram", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { @@ -497,9 +490,9 @@ func TestTemporality(t *testing.T) { createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNHCBSeries("test_histogram_1", prompb.Histogram_GAUGE, ts), - createPromNHCBSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNHCBSeries("test_histogram_1", histogram.GaugeType, ts, model.MetricTypeUnknown), + createPromNHCBSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, }, { @@ -510,8 +503,8 @@ func TestTemporality(t *testing.T) { createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromNHCBSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts), + expectedHistograms: []combinedHistogram{ + createPromNHCBSeries("test_histogram_2", histogram.UnknownCounterReset, ts, model.MetricTypeHistogram), }, expectedError: `invalid temporality and type combination for metric "test_histogram_1"`, }, @@ -523,8 +516,8 @@ func TestTemporality(t *testing.T) { createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: createPromClassicHistogramSeries("test_histogram_2", ts), - expectedError: `invalid temporality and type combination for metric "test_histogram_1"`, + expectedSamples: createPromClassicHistogramSeries("test_histogram_2", ts, model.MetricTypeHistogram), + expectedError: `invalid temporality and type combination for metric "test_histogram_1"`, }, { name: "delta histogram with buckets and convertToNHCB=false when allowed", @@ -534,9 +527,9 @@ func TestTemporality(t *testing.T) { createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts), createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts), }, - expectedSeries: append( - createPromClassicHistogramSeries("test_histogram_1", ts), - createPromClassicHistogramSeries("test_histogram_2", ts)..., + expectedSamples: append( + createPromClassicHistogramSeries("test_histogram_1", ts, model.MetricTypeUnknown), + createPromClassicHistogramSeries("test_histogram_2", ts, model.MetricTypeHistogram)..., ), }, { @@ -544,15 +537,15 @@ func TestTemporality(t *testing.T) { inputSeries: []pmetric.Metric{ createOtelSummary("test_summary_1", ts), }, - expectedSeries: createPromSummarySeries("test_summary_1", ts), + expectedSamples: createPromSummarySeries("test_summary_1", ts), }, { name: "gauge does not have temporality", inputSeries: []pmetric.Metric{ createOtelGauge("test_gauge_1", ts), }, - expectedSeries: []prompb.TimeSeries{ - createPromFloatSeries("test_gauge_1", ts), + expectedSamples: []combinedSample{ + createPromFloatSeries("test_gauge_1", ts, model.MetricTypeGauge), }, }, { @@ -560,8 +553,7 @@ func TestTemporality(t *testing.T) { inputSeries: []pmetric.Metric{ createOtelEmptyType("test_empty"), }, - expectedSeries: []prompb.TimeSeries{}, - expectedError: `could not get aggregation temporality for test_empty as it has unsupported metric type Empty`, + expectedError: `could not get aggregation temporality for test_empty as it has unsupported metric type Empty`, }, } @@ -575,7 +567,8 @@ func TestTemporality(t *testing.T) { s.CopyTo(sm.Metrics().AppendEmpty()) } - c := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + c := NewPrometheusConverter(mockAppender) settings := Settings{ AllowDeltaTemporality: tc.allowDelta, ConvertHistogramsToNHCB: tc.convertToNHCB, @@ -588,11 +581,11 @@ func TestTemporality(t *testing.T) { } else { require.NoError(t, err) } - - series := c.TimeSeries() + require.NoError(t, mockAppender.Commit()) // Sort series to make the test deterministic. - testutil.RequireEqual(t, sortTimeSeries(tc.expectedSeries), sortTimeSeries(series)) + requireEqual(t, tc.expectedSamples, mockAppender.samples) + requireEqual(t, tc.expectedHistograms, mockAppender.histograms) }) } } @@ -603,6 +596,7 @@ func createOtelSum(name string, temporality pmetric.AggregationTemporality, ts t m.SetName(name) sum := m.SetEmptySum() sum.SetAggregationTemporality(temporality) + sum.SetIsMonotonic(true) dp := sum.DataPoints().AppendEmpty() dp.SetDoubleValue(5) dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) @@ -610,16 +604,15 @@ func createOtelSum(name string, temporality pmetric.AggregationTemporality, ts t return m } -func createPromFloatSeries(name string, ts time.Time) prompb.TimeSeries { - return prompb.TimeSeries{ - Labels: []prompb.Label{ - {Name: "__name__", Value: name}, - {Name: "test_label", Value: "test_value"}, +func createPromFloatSeries(name string, ts time.Time, typ model.MetricType) combinedSample { + return combinedSample{ + metricFamilyName: name, + ls: labels.FromStrings("__name__", name, "test_label", "test_value"), + t: ts.UnixMilli(), + v: 5, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{ - Value: 5, - Timestamp: ts.UnixMilli(), - }}, } } @@ -649,22 +642,21 @@ func createOtelExponentialHistogram(name string, temporality pmetric.Aggregation return m } -func createPromNativeHistogramSeries(name string, hint prompb.Histogram_ResetHint, ts time.Time) prompb.TimeSeries { - return prompb.TimeSeries{ - Labels: []prompb.Label{ - {Name: "__name__", Value: name}, - {Name: "test_label", Value: "test_value"}, +func createPromNativeHistogramSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) combinedHistogram { + return combinedHistogram{ + metricFamilyName: name, + ls: labels.FromStrings("__name__", name, "test_label", "test_value"), + t: ts.UnixMilli(), + meta: metadata.Metadata{ + Type: typ, }, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 1}, - Sum: 5, - Schema: 0, - ZeroThreshold: 1e-128, - ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, - Timestamp: ts.UnixMilli(), - ResetHint: hint, - }, + h: &histogram.Histogram{ + Count: 1, + Sum: 5, + Schema: 0, + ZeroThreshold: 1e-128, + ZeroCount: 0, + CounterResetHint: hint, }, } } @@ -685,72 +677,77 @@ func createOtelExplicitHistogram(name string, temporality pmetric.AggregationTem return m } -func createPromNHCBSeries(name string, hint prompb.Histogram_ResetHint, ts time.Time) prompb.TimeSeries { - return prompb.TimeSeries{ - Labels: []prompb.Label{ - {Name: "__name__", Value: name}, - {Name: "test_label", Value: "test_value"}, +func createPromNHCBSeries(name string, hint histogram.CounterResetHint, ts time.Time, typ model.MetricType) combinedHistogram { + return combinedHistogram{ + metricFamilyName: name, + ls: labels.FromStrings("__name__", name, "test_label", "test_value"), + meta: metadata.Metadata{ + Type: typ, }, - Histograms: []prompb.Histogram{ - { - Count: &prompb.Histogram_CountInt{CountInt: 20}, - Sum: 30, - Schema: -53, - ZeroThreshold: 0, - ZeroCount: nil, - PositiveSpans: []prompb.BucketSpan{ - { - Length: 3, - }, + t: ts.UnixMilli(), + h: &histogram.Histogram{ + Count: 20, + Sum: 30, + Schema: -53, + ZeroThreshold: 0, + PositiveSpans: []histogram.Span{ + { + Length: 3, }, - PositiveDeltas: []int64{10, 0, -10}, - CustomValues: []float64{1, 2}, - Timestamp: ts.UnixMilli(), - ResetHint: hint, }, + PositiveBuckets: []int64{10, 0, -10}, + CustomValues: []float64{1, 2}, + CounterResetHint: hint, }, } } -func createPromClassicHistogramSeries(name string, ts time.Time) []prompb.TimeSeries { - return []prompb.TimeSeries{ +func createPromClassicHistogramSeries(name string, ts time.Time, typ model.MetricType) []combinedSample { + return []combinedSample{ { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_bucket"}, - {Name: "le", Value: "1"}, - {Name: "test_label", Value: "test_value"}, + metricFamilyName: name, + ls: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 30, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{Value: 10, Timestamp: ts.UnixMilli()}}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_bucket"}, - {Name: "le", Value: "2"}, - {Name: "test_label", Value: "test_value"}, + metricFamilyName: name, + ls: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 20, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{Value: 20, Timestamp: ts.UnixMilli()}}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_bucket"}, - {Name: "le", Value: "+Inf"}, - {Name: "test_label", Value: "test_value"}, + metricFamilyName: name, + ls: labels.FromStrings("__name__", name+"_bucket", "le", "1", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 10, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{Value: 20, Timestamp: ts.UnixMilli()}}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_count"}, - {Name: "test_label", Value: "test_value"}, + metricFamilyName: name, + ls: labels.FromStrings("__name__", name+"_bucket", "le", "2", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 20, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{Value: 20, Timestamp: ts.UnixMilli()}}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_sum"}, - {Name: "test_label", Value: "test_value"}, + metricFamilyName: name, + ls: labels.FromStrings("__name__", name+"_bucket", "le", "+Inf", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 20, + meta: metadata.Metadata{ + Type: typ, }, - Samples: []prompb.Sample{{Value: 30, Timestamp: ts.UnixMilli()}}, }, } } @@ -771,38 +768,34 @@ func createOtelSummary(name string, ts time.Time) pmetric.Metric { return m } -func createPromSummarySeries(name string, ts time.Time) []prompb.TimeSeries { - return []prompb.TimeSeries{ +func createPromSummarySeries(name string, ts time.Time) []combinedSample { + return []combinedSample{ { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_sum"}, - {Name: "test_label", Value: "test_value"}, + metricFamilyName: name, + ls: labels.FromStrings("__name__", name+"_sum", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 18, + meta: metadata.Metadata{ + Type: model.MetricTypeSummary, }, - Samples: []prompb.Sample{{ - Value: 18, - Timestamp: ts.UnixMilli(), - }}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name + "_count"}, - {Name: "test_label", Value: "test_value"}, + metricFamilyName: name, + ls: labels.FromStrings("__name__", name+"_count", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 9, + meta: metadata.Metadata{ + Type: model.MetricTypeSummary, }, - Samples: []prompb.Sample{{ - Value: 9, - Timestamp: ts.UnixMilli(), - }}, }, { - Labels: []prompb.Label{ - {Name: "__name__", Value: name}, - {Name: "quantile", Value: "0.5"}, - {Name: "test_label", Value: "test_value"}, + metricFamilyName: name, + ls: labels.FromStrings("__name__", name, "quantile", "0.5", "test_label", "test_value"), + t: ts.UnixMilli(), + v: 2, + meta: metadata.Metadata{ + Type: model.MetricTypeSummary, }, - Samples: []prompb.Sample{{ - Value: 2, - Timestamp: ts.UnixMilli(), - }}, }, } } @@ -814,20 +807,6 @@ func createOtelEmptyType(name string) pmetric.Metric { return m } -func sortTimeSeries(series []prompb.TimeSeries) []prompb.TimeSeries { - for i := range series { - sort.Slice(series[i].Labels, func(j, k int) bool { - return series[i].Labels[j].Name < series[i].Labels[k].Name - }) - } - - sort.Slice(series, func(i, j int) bool { - return fmt.Sprint(series[i].Labels) < fmt.Sprint(series[j].Labels) - }) - - return series -} - func TestTranslatorMetricFromOtelMetric(t *testing.T) { tests := []struct { name string @@ -989,19 +968,23 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { settings, pmetric.AggregationTemporalityCumulative, ) + appMetrics := NewCombinedAppenderMetrics(prometheus.NewRegistry()) + noOpLogger := promslog.NewNopLogger() b.ResetTimer() for range b.N { - converter := NewPrometheusConverter() + app := &noOpAppender{} + mockAppender := NewCombinedAppender(app, noOpLogger, false, appMetrics) + converter := NewPrometheusConverter(mockAppender) annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings) require.NoError(b, err) require.Empty(b, annots) if histogramCount+nonHistogramCount > 0 { - require.NotEmpty(b, converter.TimeSeries()) - require.NotEmpty(b, converter.Metadata()) + require.Positive(b, app.samples+app.histograms) + require.Positive(b, app.metadata) } else { - require.Empty(b, converter.TimeSeries()) - require.Empty(b, converter.Metadata()) + require.Zero(b, app.samples+app.histograms) + require.Zero(b, app.metadata) } } }) @@ -1016,10 +999,57 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { } } +type noOpAppender struct { + samples int + histograms int + metadata int +} + +var _ storage.Appender = &noOpAppender{} + +func (a *noOpAppender) Append(_ storage.SeriesRef, _ labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) { + a.samples++ + return 1, nil +} + +func (*noOpAppender) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { + return 1, nil +} + +func (a *noOpAppender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + a.histograms++ + return 1, nil +} + +func (*noOpAppender) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 1, nil +} + +func (a *noOpAppender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { + a.metadata++ + return 1, nil +} + +func (*noOpAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { + return 1, nil +} + +func (*noOpAppender) Commit() error { + return nil +} + +func (*noOpAppender) Rollback() error { + return nil +} + +func (*noOpAppender) SetOptions(_ *storage.AppendOptions) { + panic("not implemented") +} + type wantPrometheusMetric struct { name string familyName string - metricType prompb.MetricMetadata_MetricType + metricType model.MetricType description string unit string } @@ -1066,11 +1096,11 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou generateAttributes(h.Attributes(), "series", labelsPerMetric) generateExemplars(h.Exemplars(), exemplarsPerSeries, ts) - metricType := prompb.MetricMetadata_HISTOGRAM + metricType := model.MetricTypeHistogram if temporality != pmetric.AggregationTemporalityCumulative { // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. - metricType = prompb.MetricMetadata_UNKNOWN + metricType = model.MetricTypeUnknown } wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{ name: fmt.Sprintf("histogram_%d%s_bucket", i, suffix), @@ -1108,11 +1138,11 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou generateAttributes(point.Attributes(), "series", labelsPerMetric) generateExemplars(point.Exemplars(), exemplarsPerSeries, ts) - metricType := prompb.MetricMetadata_GAUGE + metricType := model.MetricTypeGauge if temporality != pmetric.AggregationTemporalityCumulative { // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. - metricType = prompb.MetricMetadata_UNKNOWN + metricType = model.MetricTypeUnknown } wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{ name: fmt.Sprintf("non_monotonic_sum_%d%s", i, suffix), @@ -1142,11 +1172,11 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou counterSuffix = suffix + "_total" } - metricType := prompb.MetricMetadata_COUNTER + metricType := model.MetricTypeCounter if temporality != pmetric.AggregationTemporalityCumulative { // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. - metricType = prompb.MetricMetadata_UNKNOWN + metricType = model.MetricTypeUnknown } wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{ name: fmt.Sprintf("monotonic_sum_%d%s", i, counterSuffix), @@ -1172,7 +1202,7 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou wantPromMetrics = append(wantPromMetrics, wantPrometheusMetric{ name: fmt.Sprintf("gauge_%d%s", i, suffix), familyName: fmt.Sprintf("gauge_%d%s", i, suffix), - metricType: prompb.MetricMetadata_GAUGE, + metricType: model.MetricTypeGauge, unit: "unit", description: "gauge", }) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index 849a73d987..cdae978736 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -25,11 +25,10 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/prompb" ) func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, scope scope, + resource pcommon.Resource, settings Settings, scope scope, meta Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -37,42 +36,42 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data } pt := dataPoints.At(x) - labels, err := createAttributes( + labels, err := c.createAttributes( resource, pt.Attributes(), scope, settings, nil, true, - metadata, + meta, model.MetricNameLabel, - metadata.MetricFamilyName, + meta.MetricFamilyName, ) if err != nil { return err } - sample := &prompb.Sample{ - // convert ns to ms - Timestamp: convertTimeStamp(pt.Timestamp()), - } + var val float64 switch pt.ValueType() { case pmetric.NumberDataPointValueTypeInt: - sample.Value = float64(pt.IntValue()) + val = float64(pt.IntValue()) case pmetric.NumberDataPointValueTypeDouble: - sample.Value = pt.DoubleValue() + val = pt.DoubleValue() } if pt.Flags().NoRecordedValue() { - sample.Value = math.Float64frombits(value.StaleNaN) + val = math.Float64frombits(value.StaleNaN) + } + ts := convertTimeStamp(pt.Timestamp()) + ct := convertTimeStamp(pt.StartTimestamp()) + if err := c.appender.AppendSample(labels, meta, ct, ts, val, nil); err != nil { + return err } - - c.addSample(sample, labels) } return nil } func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, scope scope, + resource pcommon.Resource, settings Settings, scope scope, meta Metadata, ) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -80,41 +79,38 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo } pt := dataPoints.At(x) - lbls, err := createAttributes( + lbls, err := c.createAttributes( resource, pt.Attributes(), scope, settings, nil, true, - metadata, + meta, model.MetricNameLabel, - metadata.MetricFamilyName, + meta.MetricFamilyName, ) + if err != nil { + return nil + } + var val float64 + switch pt.ValueType() { + case pmetric.NumberDataPointValueTypeInt: + val = float64(pt.IntValue()) + case pmetric.NumberDataPointValueTypeDouble: + val = pt.DoubleValue() + } + if pt.Flags().NoRecordedValue() { + val = math.Float64frombits(value.StaleNaN) + } + ts := convertTimeStamp(pt.Timestamp()) + ct := convertTimeStamp(pt.StartTimestamp()) + exemplars, err := c.getPromExemplars(ctx, pt.Exemplars()) if err != nil { return err } - sample := &prompb.Sample{ - // convert ns to ms - Timestamp: convertTimeStamp(pt.Timestamp()), - } - switch pt.ValueType() { - case pmetric.NumberDataPointValueTypeInt: - sample.Value = float64(pt.IntValue()) - case pmetric.NumberDataPointValueTypeDouble: - sample.Value = pt.DoubleValue() - } - if pt.Flags().NoRecordedValue() { - sample.Value = math.Float64frombits(value.StaleNaN) - } - - ts := c.addSample(sample, lbls) - if ts != nil { - exemplars, err := getPromExemplars[pmetric.NumberDataPoint](ctx, &c.everyN, pt) - if err != nil { - return err - } - ts.Exemplars = append(ts.Exemplars, exemplars...) + if err := c.appender.AppendSample(lbls, meta, ct, ts, val, exemplars); err != nil { + return err } } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go index 4a298e166c..3e918eecbd 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go @@ -26,7 +26,9 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" - "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" ) func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { @@ -47,7 +49,7 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { metric func() pmetric.Metric scope scope promoteScope bool - want func() map[uint64]*prompb.TimeSeries + want func() []combinedSample }{ { name: "gauge without scope promotion", @@ -60,19 +62,17 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - { - Value: 1, - Timestamp: convertTimeStamp(pcommon.Timestamp(ts)), - }, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test", + ) + return []combinedSample{ + { + metricFamilyName: "test", + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(pcommon.Timestamp(ts)), + v: 1, }, } }, @@ -88,24 +88,22 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: true, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test"}, - {Name: "otel_scope_name", Value: defaultScope.name}, - {Name: "otel_scope_schema_url", Value: defaultScope.schemaURL}, - {Name: "otel_scope_version", Value: defaultScope.version}, - {Name: "otel_scope_attr1", Value: "value1"}, - {Name: "otel_scope_attr2", Value: "value2"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - { - Value: 1, - Timestamp: convertTimeStamp(pcommon.Timestamp(ts)), - }, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ) + return []combinedSample{ + { + metricFamilyName: "test", + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(pcommon.Timestamp(ts)), + v: 1, }, } }, @@ -114,7 +112,8 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) converter.addGaugeNumberDataPoints( context.Background(), @@ -123,12 +122,14 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { Settings{ PromoteScopeMetadata: tt.promoteScope, }, - prompb.MetricMetadata{MetricFamilyName: metric.Name()}, tt.scope, + Metadata{ + MetricFamilyName: metric.Name(), + }, ) + require.NoError(t, mockAppender.Commit()) - require.Equal(t, tt.want(), converter.unique) - require.Empty(t, converter.conflicts) + requireEqual(t, tt.want(), mockAppender.samples) }) } } @@ -151,7 +152,7 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { metric func() pmetric.Metric scope scope promoteScope bool - want func() map[uint64]*prompb.TimeSeries + want func() []combinedSample }{ { name: "sum without scope promotion", @@ -165,19 +166,17 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - { - Value: 1, - Timestamp: convertTimeStamp(ts), - }, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test", + ) + return []combinedSample{ + { + metricFamilyName: "test", + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + v: 1, }, } }, @@ -194,24 +193,22 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: true, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test"}, - {Name: "otel_scope_name", Value: defaultScope.name}, - {Name: "otel_scope_schema_url", Value: defaultScope.schemaURL}, - {Name: "otel_scope_version", Value: defaultScope.version}, - {Name: "otel_scope_attr1", Value: "value1"}, - {Name: "otel_scope_attr2", Value: "value2"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - { - Value: 1, - Timestamp: convertTimeStamp(ts), - }, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test", + "otel_scope_name", defaultScope.name, + "otel_scope_schema_url", defaultScope.schemaURL, + "otel_scope_version", defaultScope.version, + "otel_scope_attr1", "value1", + "otel_scope_attr2", "value2", + ) + return []combinedSample{ + { + metricFamilyName: "test", + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + v: 1, }, } }, @@ -230,18 +227,18 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{{ - Value: 1, - Timestamp: convertTimeStamp(ts), - }}, - Exemplars: []prompb.Exemplar{ + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test", + ) + return []combinedSample{ + { + metricFamilyName: "test", + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + v: 1, + es: []exemplar.Exemplar{ {Value: 2}, }, }, @@ -265,16 +262,18 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_sum"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: 1, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_sum", + ) + return []combinedSample{ + { + metricFamilyName: "test_sum", + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + ct: convertTimeStamp(ts), + v: 1, }, } }, @@ -294,16 +293,17 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_sum"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_sum", + ) + return []combinedSample{ + { + metricFamilyName: "test_sum", + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + v: 0, }, } }, @@ -323,16 +323,17 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { }, scope: defaultScope, promoteScope: false, - want: func() map[uint64]*prompb.TimeSeries { - labels := []prompb.Label{ - {Name: model.MetricNameLabel, Value: "test_sum"}, - } - return map[uint64]*prompb.TimeSeries{ - timeSeriesSignature(labels): { - Labels: labels, - Samples: []prompb.Sample{ - {Value: 0, Timestamp: convertTimeStamp(ts)}, - }, + want: func() []combinedSample { + lbls := labels.FromStrings( + model.MetricNameLabel, "test_sum", + ) + return []combinedSample{ + { + metricFamilyName: "test_sum", + ls: lbls, + meta: metadata.Metadata{}, + t: convertTimeStamp(ts), + v: 0, }, } }, @@ -341,7 +342,8 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { metric := tt.metric() - converter := NewPrometheusConverter() + mockAppender := &mockCombinedAppender{} + converter := NewPrometheusConverter(mockAppender) converter.addSumNumberDataPoints( context.Background(), @@ -350,12 +352,14 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { Settings{ PromoteScopeMetadata: tt.promoteScope, }, - prompb.MetricMetadata{MetricFamilyName: metric.Name()}, tt.scope, + Metadata{ + MetricFamilyName: metric.Name(), + }, ) + require.NoError(t, mockAppender.Commit()) - require.Equal(t, tt.want(), converter.unique) - require.Empty(t, converter.conflicts) + requireEqual(t, tt.want(), mockAppender.samples) }) } } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go index 716a6cd6f9..49f96e0019 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go @@ -17,42 +17,41 @@ package prometheusremotewrite import ( + "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pmetric" - - "github.com/prometheus/prometheus/prompb" ) -func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMetadata_MetricType { +func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) model.MetricType { switch otelMetric.Type() { case pmetric.MetricTypeGauge: - return prompb.MetricMetadata_GAUGE + return model.MetricTypeGauge case pmetric.MetricTypeSum: - metricType := prompb.MetricMetadata_GAUGE + metricType := model.MetricTypeGauge if otelMetric.Sum().IsMonotonic() { - metricType = prompb.MetricMetadata_COUNTER + metricType = model.MetricTypeCounter } // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. if otelMetric.Sum().AggregationTemporality() == pmetric.AggregationTemporalityDelta { - metricType = prompb.MetricMetadata_UNKNOWN + metricType = model.MetricTypeUnknown } return metricType case pmetric.MetricTypeHistogram: // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. if otelMetric.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta { - return prompb.MetricMetadata_UNKNOWN + return model.MetricTypeUnknown } - return prompb.MetricMetadata_HISTOGRAM + return model.MetricTypeHistogram case pmetric.MetricTypeSummary: - return prompb.MetricMetadata_SUMMARY + return model.MetricTypeSummary case pmetric.MetricTypeExponentialHistogram: if otelMetric.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta { // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. - return prompb.MetricMetadata_UNKNOWN + return model.MetricTypeUnknown } - return prompb.MetricMetadata_HISTOGRAM + return model.MetricTypeHistogram } - return prompb.MetricMetadata_UNKNOWN + return model.MetricTypeUnknown } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go b/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go deleted file mode 100644 index abffbe6105..0000000000 --- a/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: -// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package prometheusremotewrite - -import ( - "github.com/prometheus/prometheus/prompb" -) - -// TimeSeries returns a slice of the prompb.TimeSeries that were converted from OTel format. -func (c *PrometheusConverter) TimeSeries() []prompb.TimeSeries { - conflicts := 0 - for _, ts := range c.conflicts { - conflicts += len(ts) - } - allTS := make([]prompb.TimeSeries, 0, len(c.unique)+conflicts) - for _, ts := range c.unique { - allTS = append(allTS, *ts) - } - for _, cTS := range c.conflicts { - for _, ts := range cTS { - allTS = append(allTS, *ts) - } - } - - return allTS -} - -// Metadata returns a slice of the prompb.Metadata that were converted from OTel format. -func (c *PrometheusConverter) Metadata() []prompb.MetricMetadata { - return c.metadata -} diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 7c24a102d5..ce4c569715 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -537,25 +537,29 @@ type OTLPOptions struct { LookbackDelta time.Duration // Add type and unit labels to the metrics. EnableTypeAndUnitLabels bool + // IngestCTZeroSample enables writing zero samples based on the start time + // of metrics. + IngestCTZeroSample bool } // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. -func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler { +func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler { if opts.NativeDelta && opts.ConvertDelta { // This should be validated when iterating through feature flags, so not expected to fail here. panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time") } ex := &rwExporter{ - writeHandler: &writeHandler{ - logger: logger, - appendable: appendable, - }, + logger: logger, + appendable: appendable, config: configFunc, allowDeltaTemporality: opts.NativeDelta, lookbackDelta: opts.LookbackDelta, + ingestCTZeroSample: opts.IngestCTZeroSample, enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels, + // Register metrics. + metrics: otlptranslator.NewCombinedAppenderMetrics(reg), } wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex} @@ -589,18 +593,26 @@ func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendabl } type rwExporter struct { - *writeHandler + logger *slog.Logger + appendable storage.Appendable config func() config.Config allowDeltaTemporality bool lookbackDelta time.Duration + ingestCTZeroSample bool enableTypeAndUnitLabels bool + + // Metrics. + metrics otlptranslator.CombinedAppenderMetrics } func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { otlpCfg := rw.config().OTLPConfig - - converter := otlptranslator.NewPrometheusConverter() - + app := &timeLimitAppender{ + Appender: rw.appendable.Appender(ctx), + maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), + } + combinedAppender := otlptranslator.NewCombinedAppender(app, rw.logger, rw.ingestCTZeroSample, rw.metrics) + converter := otlptranslator.NewPrometheusConverter(combinedAppender) annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{ AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(), AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(), @@ -612,18 +624,18 @@ func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) er LookbackDelta: rw.lookbackDelta, EnableTypeAndUnitLabels: rw.enableTypeAndUnitLabels, }) - if err != nil { - rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) - } + + defer func() { + if err != nil { + _ = app.Rollback() + return + } + err = app.Commit() + }() ws, _ := annots.AsStrings("", 0, 0) if len(ws) > 0 { rw.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) } - - err = rw.write(ctx, &prompb.WriteRequest{ - Timeseries: converter.TimeSeries(), - Metadata: converter.Metadata(), - }) return err } diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 48b8a377cd..f50106b3d4 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -948,8 +948,8 @@ func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v if m.appendSampleErr != nil { return 0, m.appendSampleErr } - - latestTs := m.latestSample[l.Hash()] + hash := l.Hash() + latestTs := m.latestSample[hash] if t < latestTs { return 0, storage.ErrOutOfOrderSample } @@ -964,9 +964,9 @@ func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v return 0, tsdb.ErrInvalidSample } - m.latestSample[l.Hash()] = t + m.latestSample[hash] = t m.samples = append(m.samples, mockSample{l, t, v}) - return 0, nil + return storage.SeriesRef(hash), nil } func (m *mockAppendable) Commit() error { @@ -984,12 +984,12 @@ func (m *mockAppendable) Rollback() error { return nil } -func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { +func (m *mockAppendable) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { if m.appendExemplarErr != nil { return 0, m.appendExemplarErr } - latestTs := m.latestExemplar[l.Hash()] + latestTs := m.latestExemplar[uint64(ref)] if e.Ts < latestTs { return 0, storage.ErrOutOfOrderExemplar } @@ -997,21 +997,21 @@ func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e return 0, storage.ErrDuplicateExemplar } - m.latestExemplar[l.Hash()] = e.Ts + m.latestExemplar[uint64(ref)] = e.Ts m.exemplars = append(m.exemplars, mockExemplar{l, e.Labels, e.Ts, e.Value}) - return 0, nil + return ref, nil } func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if m.appendHistogramErr != nil { return 0, m.appendHistogramErr } - + hash := l.Hash() var latestTs int64 if h != nil { - latestTs = m.latestHistogram[l.Hash()] + latestTs = m.latestHistogram[hash] } else { - latestTs = m.latestFloatHist[l.Hash()] + latestTs = m.latestFloatHist[hash] } if t < latestTs { return 0, storage.ErrOutOfOrderSample @@ -1028,12 +1028,12 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t } if h != nil { - m.latestHistogram[l.Hash()] = t + m.latestHistogram[hash] = t } else { - m.latestFloatHist[l.Hash()] = t + m.latestFloatHist[hash] = t } m.histograms = append(m.histograms, mockHistogram{l, t, h, fh}) - return 0, nil + return storage.SeriesRef(hash), nil } func (m *mockAppendable) AppendHistogramCTZeroSample(_ storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { @@ -1045,12 +1045,12 @@ func (m *mockAppendable) AppendHistogramCTZeroSample(_ storage.SeriesRef, l labe if ct > t { return 0, storage.ErrOutOfOrderSample } - + hash := l.Hash() var latestTs int64 if h != nil { - latestTs = m.latestHistogram[l.Hash()] + latestTs = m.latestHistogram[hash] } else { - latestTs = m.latestFloatHist[l.Hash()] + latestTs = m.latestFloatHist[hash] } if ct < latestTs { return 0, storage.ErrOutOfOrderSample @@ -1068,22 +1068,22 @@ func (m *mockAppendable) AppendHistogramCTZeroSample(_ storage.SeriesRef, l labe } if h != nil { - m.latestHistogram[l.Hash()] = ct + m.latestHistogram[hash] = ct m.histograms = append(m.histograms, mockHistogram{l, ct, &histogram.Histogram{}, nil}) } else { - m.latestFloatHist[l.Hash()] = ct + m.latestFloatHist[hash] = ct m.histograms = append(m.histograms, mockHistogram{l, ct, nil, &histogram.FloatHistogram{}}) } - return 0, nil + return storage.SeriesRef(hash), nil } -func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) { +func (m *mockAppendable) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) { if m.updateMetadataErr != nil { return 0, m.updateMetadataErr } m.metadata = append(m.metadata, mockMetadata{l: l, m: mp}) - return 0, nil + return ref, nil } func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { @@ -1095,8 +1095,8 @@ func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, l labels.Labels if ct > t { return 0, storage.ErrOutOfOrderSample } - - latestTs := m.latestSample[l.Hash()] + hash := l.Hash() + latestTs := m.latestSample[hash] if ct < latestTs { return 0, storage.ErrOutOfOrderSample } @@ -1111,7 +1111,7 @@ func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, l labels.Labels return 0, tsdb.ErrInvalidSample } - m.latestSample[l.Hash()] = ct + m.latestSample[hash] = ct m.samples = append(m.samples, mockSample{l, ct, 0}) - return 0, nil + return storage.SeriesRef(hash), nil } diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go index 7496a2c78c..dd9ea0282c 100644 --- a/storage/remote/write_test.go +++ b/storage/remote/write_test.go @@ -44,6 +44,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/storage" ) @@ -387,12 +388,14 @@ func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) { func TestOTLPWriteHandler(t *testing.T) { timestamp := time.Now() - exportRequest := generateOTLPWriteRequest(timestamp) + var zeroTime time.Time + exportRequest := generateOTLPWriteRequest(timestamp, zeroTime) for _, testCase := range []struct { name string otlpCfg config.OTLPConfig typeAndUnitLabels bool expectedSamples []mockSample + expectedMetadata []mockMetadata }{ { name: "NoTranslation/NoTypeAndUnitLabels", @@ -401,24 +404,70 @@ func TestOTLPWriteHandler(t *testing.T) { }, expectedSamples: []mockSample{ { - l: labels.New(labels.Label{Name: "__name__", Value: "test.counter"}, - labels.Label{Name: "foo.bar", Value: "baz"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}), + l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 10.0, }, { - l: labels.New( - labels.Label{Name: "__name__", Value: "target_info"}, - labels.Label{Name: "host.name", Value: "test-host"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}, - ), + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 1, }, }, + expectedMetadata: []mockMetadata{ + { + l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, + }, + }, }, { name: "NoTranslation/WithTypeAndUnitLabels", @@ -428,26 +477,71 @@ func TestOTLPWriteHandler(t *testing.T) { typeAndUnitLabels: true, expectedSamples: []mockSample{ { - l: labels.New(labels.Label{Name: "__name__", Value: "test.counter"}, - labels.Label{Name: "__type__", Value: "counter"}, - labels.Label{Name: "__unit__", Value: "bytes"}, - labels.Label{Name: "foo.bar", Value: "baz"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}), + l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 10.0, }, { - l: labels.New( - labels.Label{Name: "__name__", Value: "target_info"}, - labels.Label{Name: "host.name", Value: "test-host"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}, - ), + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 1, }, }, + expectedMetadata: []mockMetadata{ + { + // Metadata labels follow series labels. + l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, + }, + }, }, { name: "UnderscoreEscapingWithSuffixes/NoTypeAndUnitLabels", @@ -456,24 +550,71 @@ func TestOTLPWriteHandler(t *testing.T) { }, expectedSamples: []mockSample{ { - l: labels.New(labels.Label{Name: "__name__", Value: "test_counter_bytes_total"}, - labels.Label{Name: "foo_bar", Value: "baz"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}), + l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 10.0, }, { - l: labels.New( - labels.Label{Name: "__name__", Value: "target_info"}, - labels.Label{Name: "host_name", Value: "test-host"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}, - ), + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 1, }, }, + expectedMetadata: []mockMetadata{ + // All get _bytes unit suffix and counter also gets _total. + { + l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, + }, + }, }, { name: "UnderscoreEscapingWithoutSuffixes", @@ -482,24 +623,70 @@ func TestOTLPWriteHandler(t *testing.T) { }, expectedSamples: []mockSample{ { - l: labels.New(labels.Label{Name: "__name__", Value: "test_counter"}, - labels.Label{Name: "foo_bar", Value: "baz"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}), + l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 10.0, }, { - l: labels.New( - labels.Label{Name: "__name__", Value: "target_info"}, - labels.Label{Name: "host_name", Value: "test-host"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}, - ), + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 1, }, }, + expectedMetadata: []mockMetadata{ + { + l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, + }, + }, }, { name: "UnderscoreEscapingWithSuffixes/WithTypeAndUnitLabels", @@ -529,6 +716,60 @@ func TestOTLPWriteHandler(t *testing.T) { v: 1, }, }, + expectedMetadata: []mockMetadata{ + { + l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, + }, + }, }, { name: "NoUTF8EscapingWithSuffixes/NoTypeAndUnitLabels", @@ -537,24 +778,71 @@ func TestOTLPWriteHandler(t *testing.T) { }, expectedSamples: []mockSample{ { - l: labels.New(labels.Label{Name: "__name__", Value: "test.counter_bytes_total"}, - labels.Label{Name: "foo.bar", Value: "baz"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}), + l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 10.0, }, { - l: labels.New( - labels.Label{Name: "__name__", Value: "target_info"}, - labels.Label{Name: "host.name", Value: "test-host"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}, - ), + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 1, }, }, + expectedMetadata: []mockMetadata{ + // All get _bytes unit suffix and counter also gets _total. + { + l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, + }, + }, }, { name: "NoUTF8EscapingWithSuffixes/WithTypeAndUnitLabels", @@ -564,40 +852,268 @@ func TestOTLPWriteHandler(t *testing.T) { typeAndUnitLabels: true, expectedSamples: []mockSample{ { - l: labels.New(labels.Label{Name: "__name__", Value: "test.counter_bytes_total"}, - labels.Label{Name: "__type__", Value: "counter"}, - labels.Label{Name: "__unit__", Value: "bytes"}, - labels.Label{Name: "foo.bar", Value: "baz"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}), + l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 10.0, }, { - l: labels.New( - labels.Label{Name: "__name__", Value: "target_info"}, - labels.Label{Name: "host.name", Value: "test-host"}, - labels.Label{Name: "instance", Value: "test-instance"}, - labels.Label{Name: "job", Value: "test-service"}, - ), + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), t: timestamp.UnixMilli(), v: 1, }, }, + expectedMetadata: []mockMetadata{ + // All get _bytes unit suffix and counter also gets _total. + { + l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"}, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), + m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, + }, + }, }, } { t.Run(testCase.name, func(t *testing.T) { - appendable := handleOTLP(t, exportRequest, testCase.otlpCfg, testCase.typeAndUnitLabels) + otlpOpts := OTLPOptions{ + EnableTypeAndUnitLabels: testCase.typeAndUnitLabels, + } + appendable := handleOTLP(t, exportRequest, testCase.otlpCfg, otlpOpts) for _, sample := range testCase.expectedSamples { requireContainsSample(t, appendable.samples, sample) } + for _, meta := range testCase.expectedMetadata { + requireContainsMetadata(t, appendable.metadata, meta) + } require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count) require.Len(t, appendable.histograms, 1) // 1 (exponential histogram) + require.Len(t, appendable.metadata, 13) // for each float and histogram sample require.Len(t, appendable.exemplars, 1) // 1 (exemplar) }) } } +// Check that start time is ingested if ingestCTZeroSample is enabled +// and the start time is actually set (non-zero). +func TestOTLPWriteHandler_StartTime(t *testing.T) { + timestamp := time.Now() + startTime := timestamp.Add(-1 * time.Millisecond) + var zeroTime time.Time + + expectedSamples := []mockSample{ + { + l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + t: timestamp.UnixMilli(), + v: 10.0, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + t: timestamp.UnixMilli(), + v: 10.0, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + t: timestamp.UnixMilli(), + v: 30.0, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + t: timestamp.UnixMilli(), + v: 12.0, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"), + t: timestamp.UnixMilli(), + v: 2.0, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"), + t: timestamp.UnixMilli(), + v: 4.0, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), + t: timestamp.UnixMilli(), + v: 6.0, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"), + t: timestamp.UnixMilli(), + v: 8.0, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"), + t: timestamp.UnixMilli(), + v: 10.0, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"), + t: timestamp.UnixMilli(), + v: 12.0, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"), + t: timestamp.UnixMilli(), + v: 12.0, + }, + { + l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"), + t: timestamp.UnixMilli(), + v: 1.0, + }, + } + expectedHistograms := []mockHistogram{ + { + l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"), + t: timestamp.UnixMilli(), + h: &histogram.Histogram{ + Schema: 2, + ZeroThreshold: 1e-128, + ZeroCount: 2, + Count: 10, + Sum: 30, + PositiveSpans: []histogram.Span{{Offset: 1, Length: 5}}, + PositiveBuckets: []int64{2, 0, 0, 0, 0}, + }, + }, + } + + expectedSamplesWithCTZero := make([]mockSample, 0, len(expectedSamples)*2-1) // All samples will get CT zero, except target_info. + for _, s := range expectedSamples { + if s.l.Get(model.MetricNameLabel) != "target_info" { + expectedSamplesWithCTZero = append(expectedSamplesWithCTZero, mockSample{ + l: s.l.Copy(), + t: startTime.UnixMilli(), + v: 0, + }) + } + expectedSamplesWithCTZero = append(expectedSamplesWithCTZero, s) + } + expectedHistogramsWithCTZero := make([]mockHistogram, 0, len(expectedHistograms)*2) + for _, s := range expectedHistograms { + if s.l.Get(model.MetricNameLabel) != "target_info" { + expectedHistogramsWithCTZero = append(expectedHistogramsWithCTZero, mockHistogram{ + l: s.l.Copy(), + t: startTime.UnixMilli(), + h: &histogram.Histogram{}, + }) + } + expectedHistogramsWithCTZero = append(expectedHistogramsWithCTZero, s) + } + + for _, testCase := range []struct { + name string + otlpOpts OTLPOptions + startTime time.Time + expectCTZero bool + expectedSamples []mockSample + expectedHistograms []mockHistogram + }{ + { + name: "IngestCTZero=false/startTime=0", + otlpOpts: OTLPOptions{ + IngestCTZeroSample: false, + }, + startTime: zeroTime, + expectedSamples: expectedSamples, + expectedHistograms: expectedHistograms, + }, + { + name: "IngestCTZero=true/startTime=0", + otlpOpts: OTLPOptions{ + IngestCTZeroSample: true, + }, + startTime: zeroTime, + expectedSamples: expectedSamples, + expectedHistograms: expectedHistograms, + }, + { + name: "IngestCTZero=false/startTime=ts-1ms", + otlpOpts: OTLPOptions{ + IngestCTZeroSample: false, + }, + startTime: startTime, + expectedSamples: expectedSamples, + expectedHistograms: expectedHistograms, + }, + { + name: "IngestCTZero=true/startTime=ts-1ms", + otlpOpts: OTLPOptions{ + IngestCTZeroSample: true, + }, + startTime: startTime, + expectedSamples: expectedSamplesWithCTZero, + expectedHistograms: expectedHistogramsWithCTZero, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + exportRequest := generateOTLPWriteRequest(timestamp, testCase.startTime) + appendable := handleOTLP(t, exportRequest, config.OTLPConfig{ + TranslationStrategy: otlptranslator.NoTranslation, + }, testCase.otlpOpts) + for i, expect := range testCase.expectedSamples { + actual := appendable.samples[i] + require.True(t, labels.Equal(expect.l, actual.l), "sample labels,pos=%v", i) + require.Equal(t, expect.t, actual.t, "sample timestamp,pos=%v", i) + require.Equal(t, expect.v, actual.v, "sample value,pos=%v", i) + } + for i, expect := range testCase.expectedHistograms { + actual := appendable.histograms[i] + require.True(t, labels.Equal(expect.l, actual.l), "histogram labels,pos=%v", i) + require.Equal(t, expect.t, actual.t, "histogram timestamp,pos=%v", i) + require.True(t, expect.h.Equals(actual.h), "histogram value,pos=%v", i) + } + require.Len(t, appendable.samples, len(testCase.expectedSamples)) + require.Len(t, appendable.histograms, len(testCase.expectedHistograms)) + }) + } +} + func requireContainsSample(t *testing.T, actual []mockSample, expected mockSample) { t.Helper() @@ -611,7 +1127,20 @@ func requireContainsSample(t *testing.T, actual []mockSample, expected mockSampl "actual : %v", expected, actual)) } -func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig, typeAndUnitLabels bool) *mockAppendable { +func requireContainsMetadata(t *testing.T, actual []mockMetadata, expected mockMetadata) { + t.Helper() + + for _, got := range actual { + if labels.Equal(expected.l, got.l) && expected.m.Type == got.m.Type && expected.m.Unit == got.m.Unit && expected.m.Help == got.m.Help { + return + } + } + require.Fail(t, fmt.Sprintf("Metadata not found: \n"+ + "expected: %v\n"+ + "actual : %v", expected, actual)) +} + +func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig, otlpOpts OTLPOptions) *mockAppendable { buf, err := exportRequest.MarshalProto() require.NoError(t, err) @@ -619,12 +1148,13 @@ func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg c require.NoError(t, err) req.Header.Set("Content-Type", "application/x-protobuf") + log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn})) appendable := &mockAppendable{} - handler := NewOTLPWriteHandler(nil, nil, appendable, func() config.Config { + handler := NewOTLPWriteHandler(log, nil, appendable, func() config.Config { return config.Config{ OTLPConfig: otlpCfg, } - }, OTLPOptions{EnableTypeAndUnitLabels: typeAndUnitLabels}) + }, otlpOpts) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -634,7 +1164,7 @@ func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg c return appendable } -func generateOTLPWriteRequest(timestamp time.Time) pmetricotlp.ExportRequest { +func generateOTLPWriteRequest(timestamp, startTime time.Time) pmetricotlp.ExportRequest { d := pmetric.NewMetrics() // Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram @@ -659,6 +1189,7 @@ func generateOTLPWriteRequest(timestamp time.Time) pmetricotlp.ExportRequest { counterDataPoint := counterMetric.Sum().DataPoints().AppendEmpty() counterDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) + counterDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) counterDataPoint.SetDoubleValue(10.0) counterDataPoint.Attributes().PutStr("foo.bar", "baz") @@ -678,6 +1209,7 @@ func generateOTLPWriteRequest(timestamp time.Time) pmetricotlp.ExportRequest { gaugeDataPoint := gaugeMetric.Gauge().DataPoints().AppendEmpty() gaugeDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) + gaugeDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) gaugeDataPoint.SetDoubleValue(10.0) gaugeDataPoint.Attributes().PutStr("foo.bar", "baz") @@ -691,9 +1223,10 @@ func generateOTLPWriteRequest(timestamp time.Time) pmetricotlp.ExportRequest { histogramDataPoint := histogramMetric.Histogram().DataPoints().AppendEmpty() histogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) + histogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) histogramDataPoint.ExplicitBounds().FromRaw([]float64{0.0, 1.0, 2.0, 3.0, 4.0, 5.0}) histogramDataPoint.BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2, 2}) - histogramDataPoint.SetCount(10) + histogramDataPoint.SetCount(12) histogramDataPoint.SetSum(30.0) histogramDataPoint.Attributes().PutStr("foo.bar", "baz") @@ -707,6 +1240,7 @@ func generateOTLPWriteRequest(timestamp time.Time) pmetricotlp.ExportRequest { exponentialHistogramDataPoint := exponentialHistogramMetric.ExponentialHistogram().DataPoints().AppendEmpty() exponentialHistogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) + exponentialHistogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) exponentialHistogramDataPoint.SetScale(2.0) exponentialHistogramDataPoint.Positive().BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2}) exponentialHistogramDataPoint.SetZeroCount(2) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 5bdf0f3b26..7ea81e70c6 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -342,6 +342,7 @@ func NewAPI( ConvertDelta: otlpDeltaToCumulative, NativeDelta: otlpNativeDeltaIngestion, LookbackDelta: lookbackDelta, + IngestCTZeroSample: ctZeroIngestionEnabled, EnableTypeAndUnitLabels: enableTypeAndUnitLabels, }) } From acd9aa0afb59fb5542e8dba4c6692e2a8a0df102 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Mon, 8 Sep 2025 17:26:41 +0200 Subject: [PATCH 22/89] fix(textparse/protobuf): metric family name corrupted by NHCB parser (#17156) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(textparse): implement NHCB parsing in ProtoBuf parser directly The NHCB conversion does some validation, but we can only return error from Parser.Next() not Parser.Histogram(). So the conversion needs to happen in Next(). There are 2 cases: 1. "always_scrape_classic_histograms" is enabled, in which case we convert after returning the classic series. This is to be consistent with the PromParser text parser, which collects NHCB while spitting out classic series; then returns the NHCB. 2. "always_scrape_classic_histograms" is disabled. In which case we never return the classic series. Signed-off-by: György Krajcsovits * refactor(textparse): skip classic series instead of adding NHCB around Do not return the first classic series from the EntryType state, switch to EntrySeries. This means we need to start the histogram field state from -3 , not -2. In EntrySeries, skip classic series if needed. Signed-off-by: György Krajcsovits * reuse nhcb converter Signed-off-by: György Krajcsovits * test(textparse/nhcb): test corrupting metric family name NHCB parse doesn't always copy the metric name from the underlying parser. When called via HELP, UNIT, the string is directly referenced which means that the read-ahead of NHCB can corrupt it. Signed-off-by: György Krajcsovits --- model/labels/labels_common.go | 13 +- model/textparse/benchmark_test.go | 4 +- model/textparse/interface.go | 2 +- model/textparse/nhcbparse.go | 12 +- model/textparse/nhcbparse_test.go | 137 ++- model/textparse/protobufparse.go | 134 ++- model/textparse/protobufparse_test.go | 1177 ++++++++++++++++++++++++- web/federate_test.go | 2 +- 8 files changed, 1391 insertions(+), 90 deletions(-) diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index 8345c12d16..e27da94a47 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -43,6 +43,15 @@ type Label struct { } func (ls Labels) String() string { + return ls.stringImpl(true) +} + +// StringNoSpace is like String but does not add a space after commas. +func (ls Labels) StringNoSpace() string { + return ls.stringImpl(false) +} + +func (ls Labels) stringImpl(addSpace bool) string { var bytea [1024]byte // On stack to avoid memory allocation while building the output. b := bytes.NewBuffer(bytea[:0]) @@ -51,7 +60,9 @@ func (ls Labels) String() string { ls.Range(func(l Label) { if i > 0 { b.WriteByte(',') - b.WriteByte(' ') + if addSpace { + b.WriteByte(' ') + } } if !model.LegacyValidation.IsValidLabelName(l.Name) { b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Name)) diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go index cd3f332a6d..59ca349e31 100644 --- a/model/textparse/benchmark_test.go +++ b/model/textparse/benchmark_test.go @@ -149,7 +149,7 @@ func benchParse(b *testing.B, data []byte, parser string) { } case "promproto": newParserFn = func(b []byte, st *labels.SymbolTable) Parser { - return NewProtobufParser(b, true, false, st) + return NewProtobufParser(b, true, false, false, st) } case "omtext": newParserFn = func(b []byte, st *labels.SymbolTable) Parser { @@ -276,7 +276,7 @@ func BenchmarkCreatedTimestampPromProto(b *testing.B) { data := createTestProtoBuf(b).Bytes() st := labels.NewSymbolTable() - p := NewProtobufParser(data, true, false, st) + p := NewProtobufParser(data, true, false, false, st) found := false Inner: diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 2bc2859ee7..c4b0aad5e8 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -142,7 +142,7 @@ func New(b []byte, contentType, fallbackType string, parseClassicHistograms, con o.enableTypeAndUnitLabels = enableTypeAndUnitLabels }) case "application/vnd.google.protobuf": - baseParser = NewProtobufParser(b, parseClassicHistograms, enableTypeAndUnitLabels, st) + return NewProtobufParser(b, parseClassicHistograms, convertClassicHistogramsToNHCB, enableTypeAndUnitLabels, st), err case "text/plain": baseParser = NewPromParser(b, st, enableTypeAndUnitLabels) default: diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go index e7cfcc028e..d820a0f8b1 100644 --- a/model/textparse/nhcbparse.go +++ b/model/textparse/nhcbparse.go @@ -18,7 +18,6 @@ import ( "io" "math" "strconv" - "strings" "github.com/prometheus/common/model" @@ -373,7 +372,16 @@ func (p *NHCBParser) processNHCB() bool { p.hNHCB = nil p.fhNHCB = fh } - p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",") + + lblsWithMetricName := p.tempLsetNHCB.DropMetricName() + // Ensure we return `metric` instead of `metric{}` for name only + // series, for consistency with wrapped parsers. + if lblsWithMetricName.IsEmpty() { + p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + } else { + p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + lblsWithMetricName.StringNoSpace() + } + p.bytesNHCB = []byte(p.metricStringNHCB) p.lsetNHCB = p.tempLsetNHCB p.swapExemplars() diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index d5d38e4edf..f3f5b9c444 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -15,18 +15,15 @@ package textparse import ( "bytes" - "encoding/binary" "strconv" "testing" - "github.com/gogo/protobuf/proto" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) func TestNHCBParserOnOMParser(t *testing.T) { @@ -182,7 +179,7 @@ foobar{quantile="0.99"} 150.1` m: "hh", typ: model.MetricTypeHistogram, }, { - m: `hh{}`, + m: `hh`, shs: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: 1, @@ -203,7 +200,7 @@ foobar{quantile="0.99"} 150.1` m: "hhh", typ: model.MetricTypeHistogram, }, { - m: `hhh{}`, + m: `hhh`, shs: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: 1, @@ -333,7 +330,7 @@ foobar{quantile="0.99"} 150.1` m: "baz", typ: model.MetricTypeHistogram, }, { - m: `baz{}`, + m: `baz`, shs: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: 17, @@ -361,7 +358,7 @@ foobar{quantile="0.99"} 150.1` m: "something", typ: model.MetricTypeHistogram, }, { - m: `something{}`, + m: `something`, shs: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: 18, @@ -479,7 +476,7 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000 m: "something", typ: model.MetricTypeHistogram, }, { - m: `something{}`, + m: `something`, shs: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: 18, @@ -737,7 +734,7 @@ func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) { // Always expect NHCB series after classic. nhcbSeries := []parsedEntry{ { - m: metric + "{}", + m: metric, shs: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: 175, @@ -893,24 +890,7 @@ metric: < > `} - varintBuf := make([]byte, binary.MaxVarintLen32) - buf := &bytes.Buffer{} - - for _, tmf := range testMetricFamilies { - pb := &dto.MetricFamily{} - // From text to proto message. - require.NoError(t, proto.UnmarshalText(tmf, pb)) - // From proto message to binary protobuf. - protoBuf, err := proto.Marshal(pb) - require.NoError(t, err) - - // Write first length, then binary protobuf. - varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) - buf.Write(varintBuf[:varintLength]) - buf.Write(protoBuf) - } - - return buf + return metricFamiliesToProtobuf(t, testMetricFamilies) } func createTestOpenMetricsHistogram() string { @@ -1054,22 +1034,7 @@ metric: < timestamp_ms: 1234568 >`} - varintBuf := make([]byte, binary.MaxVarintLen32) - buf := &bytes.Buffer{} - - for _, tmf := range testMetricFamilies { - pb := &dto.MetricFamily{} - // From text to proto message. - require.NoError(t, proto.UnmarshalText(tmf, pb)) - // From proto message to binary protobuf. - protoBuf, err := proto.Marshal(pb) - require.NoError(t, err) - - // Write first length, then binary protobuf. - varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) - buf.Write(varintBuf[:varintLength]) - buf.Write(protoBuf) - } + buf := metricFamiliesToProtobuf(t, testMetricFamilies) exp := []parsedEntry{ { @@ -1107,7 +1072,7 @@ metric: < typ: model.MetricTypeHistogram, }, { - m: "test_histogram2{}", + m: "test_histogram2", shs: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, Count: 175, @@ -1128,3 +1093,87 @@ metric: < got := testParse(t, p) requireEntries(t, exp, got) } + +// TestNHCBNotCorruptMetricNameAfterRead is a regression test for https://github.com/prometheus/prometheus/issues/17075. +func TestNHCBNotCorruptMetricNameAfterRead(t *testing.T) { + inputOM := `# HELP test_histogram_seconds Just a test histogram +# TYPE test_histogram_seconds histogram +test_histogram_seconds_count 10 +test_histogram_seconds_sum 100 +test_histogram_seconds_bucket{le="10"} 10 +test_histogram_seconds_bucket{le="+Inf"} 10 +# HELP different_metric Just a different metric +# TYPE different_metric histogram +different_metric_count 5 +different_metric_sum 50 +different_metric_bucket{le="10"} 5 +different_metric_bucket{le="+Inf"} 5 +# EOF` + + testMetricFamilies := []string{`name: "test_histogram_seconds" +help: "Just a test histogram" +type: HISTOGRAM +metric: < + histogram: < + sample_count: 10 + sample_sum: 100 + bucket: < + cumulative_count: 10 + upper_bound: 10 + > + > +>`, `name: "different_metric" +help: "Just a different metric" +type: HISTOGRAM +metric: < + histogram: < + sample_count: 5 + sample_sum: 50 + bucket: < + cumulative_count: 5 + upper_bound: 10 + > + > +>`} + + buf := metricFamiliesToProtobuf(t, testMetricFamilies) + + testCases := []struct { + input []byte + typ string + }{ + {input: buf.Bytes(), typ: "application/vnd.google.protobuf"}, + {input: []byte(inputOM), typ: "text/plain"}, + {input: []byte(inputOM), typ: "application/openmetrics-text"}, + } + + for _, tc := range testCases { + t.Run(tc.typ, func(t *testing.T) { + p, err := New(tc.input, tc.typ, "", false, true, false, false, labels.NewSymbolTable()) + require.NoError(t, err) + require.NotNil(t, p) + + getNext := func() Entry { + e, err := p.Next() + require.NoError(t, err) + return e + } + + require.Equal(t, EntryHelp, getNext()) + lastMFName, lastHelp := p.Help() + require.Equal(t, "test_histogram_seconds", string(lastMFName)) + require.Equal(t, "Just a test histogram", string(lastHelp)) + + require.Equal(t, EntryType, getNext()) + var lastType model.MetricType + lastMFName, lastType = p.Type() + require.Equal(t, "test_histogram_seconds", string(lastMFName)) + require.Equal(t, model.MetricTypeHistogram, lastType) + + require.Equal(t, EntryHistogram, getNext()) + _, _, h, _ := p.Histogram() + require.NotNil(t, h) + require.Equal(t, "test_histogram_seconds", string(lastMFName)) + }) + } +} diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index e7ce710491..1b64a4d490 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -32,6 +32,7 @@ import ( "github.com/prometheus/prometheus/model/labels" dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" "github.com/prometheus/prometheus/schema" + "github.com/prometheus/prometheus/util/convertnhcb" ) // floatFormatBufPool is exclusively used in formatOpenMetricsFloat. @@ -78,20 +79,31 @@ type ProtobufParser struct { // Whether to also parse a classic histogram that is also present as a // native histogram. - parseClassicHistograms bool + parseClassicHistograms bool + // Whether to add type and unit labels. enableTypeAndUnitLabels bool + + // Whether to convert classic histograms to native histograms with custom buckets. + convertClassicHistogramsToNHCB bool + // Reusable classic to NHCB converter. + tmpNHCB convertnhcb.TempHistogram + // We need to preload NHCB since we cannot do error handling in Histogram(). + nhcbH *histogram.Histogram + nhcbFH *histogram.FloatHistogram } // NewProtobufParser returns a parser for the payload in the byte slice. -func NewProtobufParser(b []byte, parseClassicHistograms, enableTypeAndUnitLabels bool, st *labels.SymbolTable) Parser { +func NewProtobufParser(b []byte, parseClassicHistograms, convertClassicHistogramsToNHCB, enableTypeAndUnitLabels bool, st *labels.SymbolTable) Parser { return &ProtobufParser{ dec: dto.NewMetricStreamingDecoder(b), entryBytes: &bytes.Buffer{}, builder: labels.NewScratchBuilderWithSymbolTable(st, 16), // TODO(bwplotka): Try base builder. - state: EntryInvalid, - parseClassicHistograms: parseClassicHistograms, - enableTypeAndUnitLabels: enableTypeAndUnitLabels, + state: EntryInvalid, + parseClassicHistograms: parseClassicHistograms, + enableTypeAndUnitLabels: enableTypeAndUnitLabels, + convertClassicHistogramsToNHCB: convertClassicHistogramsToNHCB, + tmpNHCB: convertnhcb.NewTempHistogram(), } } @@ -182,6 +194,15 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his h = p.dec.GetHistogram() ) + if !isNativeHistogram(h) { + // This only happens if we have a classic histogram and + // we converted it to NHCB already in Next. + if *ts != 0 { + return p.entryBytes.Bytes(), ts, p.nhcbH, p.nhcbFH + } + return p.entryBytes.Bytes(), nil, p.nhcbH, p.nhcbFH + } + if p.parseClassicHistograms && len(h.GetBucket()) > 0 { p.redoClassic = true } @@ -406,6 +427,8 @@ func (p *ProtobufParser) CreatedTimestamp() int64 { // read. func (p *ProtobufParser) Next() (Entry, error) { p.exemplarReturned = false + p.nhcbH = nil + p.nhcbFH = nil switch p.state { // Invalid state occurs on: // * First Next() call. @@ -468,8 +491,12 @@ func (p *ProtobufParser) Next() (Entry, error) { p.state = EntryType case EntryType: t := p.dec.GetType() - if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && - isNativeHistogram(p.dec.GetHistogram()) { + if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { + if !isNativeHistogram(p.dec.GetHistogram()) { + p.state = EntrySeries + p.fieldPos = -3 // We have not returned anything, let p.Next() increment it to -2. + return p.Next() + } p.state = EntryHistogram } else { p.state = EntrySeries @@ -480,14 +507,18 @@ func (p *ProtobufParser) Next() (Entry, error) { case EntrySeries: // Potentially a second series in the metric family. t := p.dec.GetType() + decodeNext := true if t == dto.MetricType_SUMMARY || t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { // Non-trivial series (complex metrics, with magic suffixes). + isClassicHistogram := (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && !isNativeHistogram(p.dec.GetHistogram()) + skipSeries := p.convertClassicHistogramsToNHCB && isClassicHistogram && !p.parseClassicHistograms + // Did we iterate over all the classic representations fields? // NOTE: p.fieldsDone is updated on p.onSeriesOrHistogramUpdate. - if !p.fieldsDone { + if !p.fieldsDone && !skipSeries { // Still some fields to iterate over. p.fieldPos++ if err := p.onSeriesOrHistogramUpdate(); err != nil { @@ -504,25 +535,39 @@ func (p *ProtobufParser) Next() (Entry, error) { // If this is a metric family containing native // histograms, it means we are here thanks to redoClassic state. // Return to native histograms for the consistent flow. - if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && - isNativeHistogram(p.dec.GetHistogram()) { - p.state = EntryHistogram + // If this is a metric family containing classic histograms, + // it means we might need to do NHCB conversion. + if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { + if !isClassicHistogram { + p.state = EntryHistogram + } else if p.convertClassicHistogramsToNHCB { + // We still need to spit out the NHCB. + var err error + p.nhcbH, p.nhcbFH, err = p.convertToNHCB(t) + if err != nil { + return EntryInvalid, err + } + p.state = EntryHistogram + // We have an NHCB to emit, no need to decode the next series. + decodeNext = false + } } } // Is there another series? - if err := p.dec.NextMetric(); err != nil { - if errors.Is(err, io.EOF) { - p.state = EntryInvalid - return p.Next() + if decodeNext { + if err := p.dec.NextMetric(); err != nil { + if errors.Is(err, io.EOF) { + p.state = EntryInvalid + return p.Next() + } + return EntryInvalid, err } - return EntryInvalid, err } if err := p.onSeriesOrHistogramUpdate(); err != nil { return EntryInvalid, err } case EntryHistogram: - // Was Histogram() called and parseClassicHistograms is true? - if p.redoClassic { + switchToClassic := func() (Entry, error) { p.redoClassic = false p.fieldPos = -3 p.fieldsDone = false @@ -530,6 +575,11 @@ func (p *ProtobufParser) Next() (Entry, error) { return p.Next() // Switch to classic histogram. } + // Was Histogram() called and parseClassicHistograms is true? + if p.redoClassic { + return switchToClassic() + } + // Is there another series? if err := p.dec.NextMetric(); err != nil { if errors.Is(err, io.EOF) { @@ -538,6 +588,14 @@ func (p *ProtobufParser) Next() (Entry, error) { } return EntryInvalid, err } + + // If this is a metric family does not contain native + // histograms, it means we are here thanks to NHCB conversion. + // Return to classic histograms for the consistent flow. + if !isNativeHistogram(p.dec.GetHistogram()) { + return switchToClassic() + } + if err := p.onSeriesOrHistogramUpdate(); err != nil { return EntryInvalid, err } @@ -690,3 +748,43 @@ func isNativeHistogram(h *dto.Histogram) bool { h.GetZeroThreshold() > 0 || h.GetZeroCount() > 0 } + +func (p *ProtobufParser) convertToNHCB(t dto.MetricType) (*histogram.Histogram, *histogram.FloatHistogram, error) { + h := p.dec.GetHistogram() + p.tmpNHCB.Reset() + // TODO(krajorama): convertnhcb should support setting integer mode up + // front since we know it here. That would avoid the converter having + // to guess it based on counts. + v := h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } + if err := p.tmpNHCB.SetCount(v); err != nil { + return nil, nil, err + } + + if err := p.tmpNHCB.SetSum(h.GetSampleSum()); err != nil { + return nil, nil, err + } + for _, b := range h.GetBucket() { + v := b.GetCumulativeCountFloat() + if v == 0 { + v = float64(b.GetCumulativeCount()) + } + if err := p.tmpNHCB.SetBucketCount(b.GetUpperBound(), v); err != nil { + return nil, nil, err + } + } + ch, cfh, err := p.tmpNHCB.Convert() + if err != nil { + return nil, nil, err + } + if t == dto.MetricType_GAUGE_HISTOGRAM { + if ch != nil { + ch.CounterResetHint = histogram.GaugeType + } else { + cfh.CounterResetHint = histogram.GaugeType + } + } + return ch, cfh, nil +} diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index 35a4238fdb..7a7eb6eec7 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -16,6 +16,7 @@ package textparse import ( "bytes" "encoding/binary" + "fmt" "testing" "github.com/gogo/protobuf/proto" @@ -28,6 +29,26 @@ import ( dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) +func metricFamiliesToProtobuf(t testing.TB, testMetricFamilies []string) *bytes.Buffer { + varintBuf := make([]byte, binary.MaxVarintLen32) + buf := &bytes.Buffer{} + + for _, tmf := range testMetricFamilies { + pb := &dto.MetricFamily{} + // From text to proto message. + require.NoError(t, proto.UnmarshalText(tmf, pb)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(pb) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) + } + return buf +} + func createTestProtoBuf(t testing.TB) *bytes.Buffer { t.Helper() @@ -803,24 +824,7 @@ metric: < `, } - varintBuf := make([]byte, binary.MaxVarintLen32) - buf := &bytes.Buffer{} - - for _, tmf := range testMetricFamilies { - pb := &dto.MetricFamily{} - // From text to proto message. - require.NoError(t, proto.UnmarshalText(tmf, pb)) - // From proto message to binary protobuf. - protoBuf, err := proto.Marshal(pb) - require.NoError(t, err) - - // Write first length, then binary protobuf. - varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) - buf.Write(varintBuf[:varintLength]) - buf.Write(protoBuf) - } - - return buf + return metricFamiliesToProtobuf(t, testMetricFamilies) } func TestProtobufParse(t *testing.T) { @@ -833,7 +837,7 @@ func TestProtobufParse(t *testing.T) { }{ { name: "parseClassicHistograms=false/enableTypeAndUnitLabels=false", - parser: NewProtobufParser(inputBuf.Bytes(), false, false, labels.NewSymbolTable()), + parser: NewProtobufParser(inputBuf.Bytes(), false, false, false, labels.NewSymbolTable()), expected: []parsedEntry{ { m: "go_build_info", @@ -1468,7 +1472,7 @@ func TestProtobufParse(t *testing.T) { }, { name: "parseClassicHistograms=false/enableTypeAndUnitLabels=true", - parser: NewProtobufParser(inputBuf.Bytes(), false, true, labels.NewSymbolTable()), + parser: NewProtobufParser(inputBuf.Bytes(), false, false, true, labels.NewSymbolTable()), expected: []parsedEntry{ { m: "go_build_info", @@ -2140,7 +2144,7 @@ func TestProtobufParse(t *testing.T) { }, { name: "parseClassicHistograms=true/enableTypeAndUnitLabels=false", - parser: NewProtobufParser(inputBuf.Bytes(), true, false, labels.NewSymbolTable()), + parser: NewProtobufParser(inputBuf.Bytes(), true, false, false, labels.NewSymbolTable()), expected: []parsedEntry{ { m: "go_build_info", @@ -3214,3 +3218,1134 @@ func TestProtobufParse(t *testing.T) { }) } } + +// TestProtobufParseWithNHCB is only concerned with classic histograms. +func TestProtobufParseWithNHCB(t *testing.T) { + testMetricFamilies := []string{ + `name: "test_histogram1" +help: "Similar histogram as before but now without sparse buckets." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.000828 + bucket: < + cumulative_count: 2 + upper_bound: -0.00048 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.00038 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00038 + timestamp: < + seconds: 1625851153 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: 1 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.000295 + > + > + schema: 0 + zero_threshold: 0 + > +> + +`, + `name: "test_histogram2_seconds" +help: "Similar histogram as before but now with integer buckets." +type: HISTOGRAM +unit: "seconds" +metric: < + histogram: < + sample_count: 6 + sample_sum: 50 + bucket: < + cumulative_count: 2 + upper_bound: -20 + > + bucket: < + cumulative_count: 4 + upper_bound: 20 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: 15 + timestamp: < + seconds: 1625851153 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 6 + upper_bound: 30 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: 25 + timestamp: < + seconds: 1625851153 + nanos: 146848499 + > + > + > + schema: 0 + zero_threshold: 0 + > +> + +`, + `name: "test_histogram_family" +help: "Test histogram metric family with two very simple histograms." +type: HISTOGRAM +metric: < + label: < + name: "foo" + value: "bar" + > + histogram: < + sample_count: 5 + sample_sum: 12.1 + bucket: < + cumulative_count: 2 + upper_bound: 1.1 + > + bucket: < + cumulative_count: 3 + upper_bound: 2.2 + > + > +> +metric: < + label: < + name: "foo" + value: "baz" + > + histogram: < + sample_count: 6 + sample_sum: 13.1 + bucket: < + cumulative_count: 0 + upper_bound: 1.1 + > + bucket: < + cumulative_count: 5 + upper_bound: 2.2 + > + > +> + +`, + `name: "empty_histogram" +help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram." +type: HISTOGRAM +metric: < + histogram: < + positive_span: < + offset: 0 + length: 0 + > + > +> + +`, + } + + buf := metricFamiliesToProtobuf(t, testMetricFamilies) + + data := buf.Bytes() + + testCases := []struct { + keepClassic bool + typeAndUnit bool + expected []parsedEntry + }{ + { + keepClassic: false, + typeAndUnit: false, + expected: []parsedEntry{ + { + m: "test_histogram1", + help: "Similar histogram as before but now without sparse buckets.", + }, + { + m: "test_histogram1", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram1", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 175, + Sum: 0.000828, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 4}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 0, 10, 147}, + CustomValues: []float64{-0.00048, -0.00038, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram1", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + // The second exemplar has no timestamp. + }, + }, + { + m: "test_histogram2_seconds", + help: "Similar histogram as before but now with integer buckets.", + }, + { + m: "test_histogram2_seconds", + unit: "seconds", + }, + { + m: "test_histogram2_seconds", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram2_seconds", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 50, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 3}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 0, 0}, + CustomValues: []float64{-20, 20, 30}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { + m: "test_histogram_family", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_family\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 3}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, -1, 1}, + CustomValues: []float64{1.1, 2.2}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "bar", + ), + }, + { + m: "test_histogram_family\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{5, -4}, + CustomValues: []float64{1.1, 2.2}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "baz", + ), + }, + { + m: "empty_histogram", + help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", + }, + { + m: "empty_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "empty_histogram", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "empty_histogram", + ), + }, + }, + }, + { + keepClassic: true, + typeAndUnit: false, + expected: []parsedEntry{ + { + m: "test_histogram1", + help: "Similar histogram as before but now without sparse buckets.", + }, + { + m: "test_histogram1", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram1_count", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram1_count", + ), + }, + { + m: "test_histogram1_sum", + v: 0.000828, + lset: labels.FromStrings( + "__name__", "test_histogram1_sum", + ), + }, + { + m: "test_histogram1_bucket\xffle\xff-0.00048", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram1_bucket", + "le", "-0.00048", + ), + }, + { + m: "test_histogram1_bucket\xffle\xff-0.00038", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram1_bucket", + "le", "-0.00038", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram1_bucket\xffle\xff1.0", + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram1_bucket", + "le", "1.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, + }, + }, + { + m: "test_histogram1_bucket\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram1_bucket", + "le", "+Inf", + ), + }, + { + m: "test_histogram1", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 175, + Sum: 0.000828, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 4}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 0, 10, 147}, + CustomValues: []float64{-0.00048, -0.00038, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram1", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + // The second exemplar has no timestamp. + }, + }, + { + m: "test_histogram2_seconds", + help: "Similar histogram as before but now with integer buckets.", + }, + { + m: "test_histogram2_seconds", + unit: "seconds", + }, + { + m: "test_histogram2_seconds", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram2_seconds_count", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_count", + ), + }, + { + m: "test_histogram2_seconds_sum", + v: 50, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_sum", + ), + }, + { + m: "test_histogram2_seconds_bucket\xffle\xff-20.0", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_bucket", + "le", "-20.0", + ), + }, + { + m: "test_histogram2_seconds_bucket\xffle\xff20.0", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_bucket", + "le", "20.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram2_seconds_bucket\xffle\xff30.0", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_bucket", + "le", "30.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram2_seconds_bucket\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_bucket", + "le", "+Inf", + ), + }, + { + m: "test_histogram2_seconds", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 50, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 3}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 0, 0}, + CustomValues: []float64{-20, 20, 30}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { + m: "test_histogram_family", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_family_count\xfffoo\xffbar", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "foo", "bar", + ), + }, + { + m: "test_histogram_family_sum\xfffoo\xffbar", + v: 12.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "foo", "bar", + ), + }, + { + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff1.1", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "1.1", + ), + }, + { + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff2.2", + v: 3, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "2.2", + ), + }, + { + m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff+Inf", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "bar", + "le", "+Inf", + ), + }, + { + m: "test_histogram_family\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 3}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, -1, 1}, + CustomValues: []float64{1.1, 2.2}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "bar", + ), + }, + { + m: "test_histogram_family_count\xfffoo\xffbaz", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "foo", "baz", + ), + }, + { + m: "test_histogram_family_sum\xfffoo\xffbaz", + v: 13.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "foo", "baz", + ), + }, + { + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff1.1", + v: 0, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "1.1", + ), + }, + { + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff2.2", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "2.2", + ), + }, + { + m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "foo", "baz", + "le", "+Inf", + ), + }, + { + m: "test_histogram_family\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{5, -4}, + CustomValues: []float64{1.1, 2.2}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "foo", "baz", + ), + }, + { + m: "empty_histogram", + help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", + }, + { + m: "empty_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "empty_histogram", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "empty_histogram", + ), + }, + }, + }, + { + keepClassic: false, + typeAndUnit: true, + expected: []parsedEntry{ + { + m: "test_histogram1", + help: "Similar histogram as before but now without sparse buckets.", + }, + { + m: "test_histogram1", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram1\xff__type__\xffhistogram", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 175, + Sum: 0.000828, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 4}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 0, 10, 147}, + CustomValues: []float64{-0.00048, -0.00038, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram1", + "__type__", "histogram", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + // The second exemplar has no timestamp. + }, + }, + { + m: "test_histogram2_seconds", + help: "Similar histogram as before but now with integer buckets.", + }, + { + m: "test_histogram2_seconds", + unit: "seconds", + }, + { + m: "test_histogram2_seconds", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram2_seconds\xff__type__\xffhistogram\xff__unit__\xffseconds", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 50, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 3}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 0, 0}, + CustomValues: []float64{-20, 20, 30}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds", + "__type__", "histogram", + "__unit__", "seconds", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { + m: "test_histogram_family", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_family\xff__type__\xffhistogram\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 3}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, -1, 1}, + CustomValues: []float64{1.1, 2.2}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "__type__", "histogram", + "foo", "bar", + ), + }, + { + m: "test_histogram_family\xff__type__\xffhistogram\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{5, -4}, + CustomValues: []float64{1.1, 2.2}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "__type__", "histogram", + "foo", "baz", + ), + }, + { + m: "empty_histogram", + help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", + }, + { + m: "empty_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "empty_histogram\xff__type__\xffhistogram", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "empty_histogram", + "__type__", "histogram", + ), + }, + }, + }, + { + keepClassic: true, + typeAndUnit: true, + expected: []parsedEntry{ + { + m: "test_histogram1", + help: "Similar histogram as before but now without sparse buckets.", + }, + { + m: "test_histogram1", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram1_count\xff__type__\xffhistogram", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram1_count", + "__type__", "histogram", + ), + }, + { + m: "test_histogram1_sum\xff__type__\xffhistogram", + v: 0.000828, + lset: labels.FromStrings( + "__name__", "test_histogram1_sum", + "__type__", "histogram", + ), + }, + { + m: "test_histogram1_bucket\xff__type__\xffhistogram\xffle\xff-0.00048", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram1_bucket", + "__type__", "histogram", + "le", "-0.00048", + ), + }, + { + m: "test_histogram1_bucket\xff__type__\xffhistogram\xffle\xff-0.00038", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram1_bucket", + "__type__", "histogram", + "le", "-0.00038", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram1_bucket\xff__type__\xffhistogram\xffle\xff1.0", + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram1_bucket", + "__type__", "histogram", + "le", "1.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, + }, + }, + { + m: "test_histogram1_bucket\xff__type__\xffhistogram\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram1_bucket", + "__type__", "histogram", + "le", "+Inf", + ), + }, + { + m: "test_histogram1\xff__type__\xffhistogram", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 175, + Sum: 0.000828, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 4}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 0, 10, 147}, + CustomValues: []float64{-0.00048, -0.00038, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram1", + "__type__", "histogram", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + // The second exemplar has no timestamp. + }, + }, + { + m: "test_histogram2_seconds", + help: "Similar histogram as before but now with integer buckets.", + }, + { + m: "test_histogram2_seconds", + unit: "seconds", + }, + { + m: "test_histogram2_seconds", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram2_seconds_count\xff__type__\xffhistogram\xff__unit__\xffseconds", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_count", + "__type__", "histogram", + "__unit__", "seconds", + ), + }, + { + m: "test_histogram2_seconds_sum\xff__type__\xffhistogram\xff__unit__\xffseconds", + v: 50, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_sum", + "__type__", "histogram", + "__unit__", "seconds", + ), + }, + { + m: "test_histogram2_seconds_bucket\xff__type__\xffhistogram\xff__unit__\xffseconds\xffle\xff-20.0", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_bucket", + "__type__", "histogram", + "__unit__", "seconds", + "le", "-20.0", + ), + }, + { + m: "test_histogram2_seconds_bucket\xff__type__\xffhistogram\xff__unit__\xffseconds\xffle\xff20.0", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_bucket", + "__type__", "histogram", + "__unit__", "seconds", + "le", "20.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram2_seconds_bucket\xff__type__\xffhistogram\xff__unit__\xffseconds\xffle\xff30.0", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_bucket", + "__type__", "histogram", + "__unit__", "seconds", + "le", "30.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram2_seconds_bucket\xff__type__\xffhistogram\xff__unit__\xffseconds\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds_bucket", + "__type__", "histogram", + "__unit__", "seconds", + "le", "+Inf", + ), + }, + { + m: "test_histogram2_seconds\xff__type__\xffhistogram\xff__unit__\xffseconds", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 50, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 3}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 0, 0}, + CustomValues: []float64{-20, 20, 30}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram2_seconds", + "__type__", "histogram", + "__unit__", "seconds", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { + m: "test_histogram_family", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_family_count\xff__type__\xffhistogram\xfffoo\xffbar", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "__type__", "histogram", + "foo", "bar", + ), + }, + { + m: "test_histogram_family_sum\xff__type__\xffhistogram\xfffoo\xffbar", + v: 12.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "__type__", "histogram", + "foo", "bar", + ), + }, + { + m: "test_histogram_family_bucket\xff__type__\xffhistogram\xfffoo\xffbar\xffle\xff1.1", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "__type__", "histogram", + "foo", "bar", + "le", "1.1", + ), + }, + { + m: "test_histogram_family_bucket\xff__type__\xffhistogram\xfffoo\xffbar\xffle\xff2.2", + v: 3, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "__type__", "histogram", + "foo", "bar", + "le", "2.2", + ), + }, + { + m: "test_histogram_family_bucket\xff__type__\xffhistogram\xfffoo\xffbar\xffle\xff+Inf", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "__type__", "histogram", + "foo", "bar", + "le", "+Inf", + ), + }, + { + m: "test_histogram_family\xff__type__\xffhistogram\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Length: 3}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, -1, 1}, + CustomValues: []float64{1.1, 2.2}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "__type__", "histogram", + "foo", "bar", + ), + }, + { + m: "test_histogram_family_count\xff__type__\xffhistogram\xfffoo\xffbaz", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_count", + "__type__", "histogram", + "foo", "baz", + ), + }, + { + m: "test_histogram_family_sum\xff__type__\xffhistogram\xfffoo\xffbaz", + v: 13.1, + lset: labels.FromStrings( + "__name__", "test_histogram_family_sum", + "__type__", "histogram", + "foo", "baz", + ), + }, + { + m: "test_histogram_family_bucket\xff__type__\xffhistogram\xfffoo\xffbaz\xffle\xff1.1", + v: 0, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "__type__", "histogram", + "foo", "baz", + "le", "1.1", + ), + }, + { + m: "test_histogram_family_bucket\xff__type__\xffhistogram\xfffoo\xffbaz\xffle\xff2.2", + v: 5, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "__type__", "histogram", + "foo", "baz", + "le", "2.2", + ), + }, + { + m: "test_histogram_family_bucket\xff__type__\xffhistogram\xfffoo\xffbaz\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram_family_bucket", + "__type__", "histogram", + "foo", "baz", + "le", "+Inf", + ), + }, + { + m: "test_histogram_family\xff__type__\xffhistogram\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{5, -4}, + CustomValues: []float64{1.1, 2.2}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "__type__", "histogram", + "foo", "baz", + ), + }, + { + m: "empty_histogram", + help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", + }, + { + m: "empty_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "empty_histogram\xff__type__\xffhistogram", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "empty_histogram", + "__type__", "histogram", + ), + }, + }, + }, + } + + for _, tc := range testCases { + name := fmt.Sprintf("keepClassic=%v,typeAndUnit=%v", tc.keepClassic, tc.typeAndUnit) + t.Run(name, func(t *testing.T) { + p := NewProtobufParser(data, tc.keepClassic, true, tc.typeAndUnit, labels.NewSymbolTable()) + got := testParse(t, p) + requireEntries(t, tc.expected, got) + }) + } +} diff --git a/web/federate_test.go b/web/federate_test.go index 97aa45edba..e0845d7cd4 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -392,7 +392,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { require.Equal(t, http.StatusOK, res.Code) body, err := io.ReadAll(res.Body) require.NoError(t, err) - p := textparse.NewProtobufParser(body, false, false, labels.NewSymbolTable()) + p := textparse.NewProtobufParser(body, false, false, false, labels.NewSymbolTable()) var actVec promql.Vector metricFamilies := 0 l := labels.Labels{} From 0fc2547740105199afbbbf4e13c657c95bd37e46 Mon Sep 17 00:00:00 2001 From: Minh Nguyen <148210689+pipiland2612@users.noreply.github.com> Date: Mon, 8 Sep 2025 23:04:55 +0300 Subject: [PATCH 23/89] Handle error gracefully for the `desymbolizeLabels` function in prompb/io/prometheus/write/v2/symbols.go (#17160) Signed-off-by: pipiland --------- Signed-off-by: pipiland Co-authored-by: pipiland --- prompb/io/prometheus/write/v2/codec.go | 13 +++++++---- prompb/io/prometheus/write/v2/symbols.go | 23 +++++++++++++++---- prompb/io/prometheus/write/v2/symbols_test.go | 3 ++- prompb/rwcommon/codec_test.go | 4 +++- storage/remote/queue_manager_test.go | 17 +++++++++++--- storage/remote/write_handler.go | 13 +++++++++-- storage/remote/write_handler_test.go | 23 +++++++++++++++++-- 7 files changed, 79 insertions(+), 17 deletions(-) diff --git a/prompb/io/prometheus/write/v2/codec.go b/prompb/io/prometheus/write/v2/codec.go index 8f119d6d01..71196edb88 100644 --- a/prompb/io/prometheus/write/v2/codec.go +++ b/prompb/io/prometheus/write/v2/codec.go @@ -25,7 +25,7 @@ import ( // NOTE(bwplotka): This file's code is tested in /prompb/rwcommon. // ToLabels return model labels.Labels from timeseries' remote labels. -func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels { +func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) (labels.Labels, error) { return desymbolizeLabels(b, m.GetLabelsRefs(), symbols) } @@ -207,13 +207,18 @@ func spansToSpansProto(s []histogram.Span) []BucketSpan { return spans } -func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) exemplar.Exemplar { +func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) (exemplar.Exemplar, error) { timestamp := m.Timestamp + lbls, err := desymbolizeLabels(b, m.LabelsRefs, symbols) + if err != nil { + return exemplar.Exemplar{}, err + } + return exemplar.Exemplar{ - Labels: desymbolizeLabels(b, m.LabelsRefs, symbols), + Labels: lbls, Value: m.Value, Ts: timestamp, HasTs: timestamp != 0, - } + }, nil } diff --git a/prompb/io/prometheus/write/v2/symbols.go b/prompb/io/prometheus/write/v2/symbols.go index f316a976f2..7c7feca239 100644 --- a/prompb/io/prometheus/write/v2/symbols.go +++ b/prompb/io/prometheus/write/v2/symbols.go @@ -13,7 +13,11 @@ package writev2 -import "github.com/prometheus/prometheus/model/labels" +import ( + "fmt" + + "github.com/prometheus/prometheus/model/labels" +) // SymbolsTable implements table for easy symbol use. type SymbolsTable struct { @@ -73,11 +77,22 @@ func (t *SymbolsTable) Reset() { } // desymbolizeLabels decodes label references, with given symbols to labels. -func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels { +// This function requires labelRefs to have an even number of elements (name-value pairs) and +// all references must be valid indices within the symbols table. It will return an error if +// these invariants are violated. +func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) (labels.Labels, error) { + if len(labelRefs)%2 != 0 { + return labels.EmptyLabels(), fmt.Errorf("invalid labelRefs length %d", len(labelRefs)) + } + b.Reset() for i := 0; i < len(labelRefs); i += 2 { - b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]]) + nameRef, valueRef := labelRefs[i], labelRefs[i+1] + if int(nameRef) >= len(symbols) || int(valueRef) >= len(symbols) { + return labels.EmptyLabels(), fmt.Errorf("labelRefs %d (name) = %d (value) outside of symbols table (size %d)", nameRef, valueRef, len(symbols)) + } + b.Add(symbols[nameRef], symbols[valueRef]) } b.Sort() - return b.Labels() + return b.Labels(), nil } diff --git a/prompb/io/prometheus/write/v2/symbols_test.go b/prompb/io/prometheus/write/v2/symbols_test.go index 3d852e88f1..7e7c7cb0bd 100644 --- a/prompb/io/prometheus/write/v2/symbols_test.go +++ b/prompb/io/prometheus/write/v2/symbols_test.go @@ -50,7 +50,8 @@ func TestSymbolsTable(t *testing.T) { encoded := s.SymbolizeLabels(ls, nil) require.Equal(t, []uint32{1, 3, 4, 5}, encoded) b := labels.NewScratchBuilder(len(encoded)) - decoded := desymbolizeLabels(&b, encoded, s.Symbols()) + decoded, err := desymbolizeLabels(&b, encoded, s.Symbols()) + require.NoError(t, err) require.Equal(t, ls, decoded) // Different buf. diff --git a/prompb/rwcommon/codec_test.go b/prompb/rwcommon/codec_test.go index b91355c51c..73a8196fa8 100644 --- a/prompb/rwcommon/codec_test.go +++ b/prompb/rwcommon/codec_test.go @@ -40,7 +40,9 @@ func TestToLabels(t *testing.T) { v2Symbols := []string{"", "__name__", "metric1", "foo", "bar"} ts := writev2.TimeSeries{LabelsRefs: []uint32{1, 2, 3, 4}} b := labels.NewScratchBuilder(2) - require.Equal(t, expected, ts.ToLabels(&b, v2Symbols)) + result, err := ts.ToLabels(&b, v2Symbols) + require.NoError(t, err) + require.Equal(t, expected, result) // No need for FromLabels in our prod code as we use symbol table to do so. }) } diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index d117349a86..7a051656d5 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -1245,7 +1245,11 @@ func v2RequestToWriteRequest(v2Req *writev2.Request) (*prompb.WriteRequest, erro } b := labels.NewScratchBuilder(0) for i, rts := range v2Req.Timeseries { - rts.ToLabels(&b, v2Req.Symbols).Range(func(l labels.Label) { + lbls, err := rts.ToLabels(&b, v2Req.Symbols) + if err != nil { + return nil, fmt.Errorf("failed to convert labels: %w", err) + } + lbls.Range(func(l labels.Label) { req.Timeseries[i].Labels = append(req.Timeseries[i].Labels, prompb.Label{ Name: l.Name, Value: l.Value, @@ -1256,7 +1260,11 @@ func v2RequestToWriteRequest(v2Req *writev2.Request) (*prompb.WriteRequest, erro for j, e := range rts.Exemplars { exemplars[j].Value = e.Value exemplars[j].Timestamp = e.Timestamp - e.ToExemplar(&b, v2Req.Symbols).Labels.Range(func(l labels.Label) { + ex, err := e.ToExemplar(&b, v2Req.Symbols) + if err != nil { + return nil, fmt.Errorf("failed to convert exemplar: %w", err) + } + ex.Labels.Range(func(l labels.Label) { exemplars[j].Labels = append(exemplars[j].Labels, prompb.Label{ Name: l.Name, Value: l.Value, @@ -1282,7 +1290,10 @@ func v2RequestToWriteRequest(v2Req *writev2.Request) (*prompb.WriteRequest, erro // Convert v2 metadata to v1 format. if rts.Metadata.Type != writev2.Metadata_METRIC_TYPE_UNSPECIFIED { - labels := rts.ToLabels(&b, v2Req.Symbols) + labels, err := rts.ToLabels(&b, v2Req.Symbols) + if err != nil { + return nil, fmt.Errorf("failed to convert metadata labels: %w", err) + } metadata := rts.ToMetadata(v2Req.Symbols) metricFamilyName := labels.String() diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index ce4c569715..7681655e61 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -388,7 +388,12 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * b = labels.NewScratchBuilder(0) ) for _, ts := range req.Timeseries { - ls := ts.ToLabels(&b, req.Symbols) + ls, err := ts.ToLabels(&b, req.Symbols) + if err != nil { + badRequestErrs = append(badRequestErrs, fmt.Errorf("parsing labels for series %v: %w", ts.LabelsRefs, err)) + samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms) + continue + } // Validate series labels early. // NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose // specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case. @@ -474,7 +479,11 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // Exemplars. for _, ep := range ts.Exemplars { - e := ep.ToExemplar(&b, req.Symbols) + e, err := ep.ToExemplar(&b, req.Symbols) + if err != nil { + badRequestErrs = append(badRequestErrs, fmt.Errorf("parsing exemplar for series %v: %w", ls.String(), err)) + continue + } ref, err = app.AppendExemplar(ref, ls, e) if err == nil { rs.Exemplars++ diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index f50106b3d4..5631e80732 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -402,6 +402,22 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { expectedCode: http.StatusBadRequest, expectedRespBody: "invalid labels for series, labels {__name__=\"test_metric1\", test_metric1=\"test_metric1\", test_metric1=\"test_metric1\"}, duplicated label test_metric1\n", }, + { + desc: "Partial write; first series with odd number of label refs", + input: append( + []writev2.TimeSeries{{LabelsRefs: []uint32{1, 2, 3}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}}, + writeV2RequestFixture.Timeseries...), + expectedCode: http.StatusBadRequest, + expectedRespBody: "parsing labels for series [1 2 3]: invalid labelRefs length 3\n", + }, + { + desc: "Partial write; first series with out-of-bounds symbol references", + input: append( + []writev2.TimeSeries{{LabelsRefs: []uint32{1, 999}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}}, + writeV2RequestFixture.Timeseries...), + expectedCode: http.StatusBadRequest, + expectedRespBody: "parsing labels for series [1 999]: labelRefs 1 (name) = 999 (value) outside of symbols table (size 18)\n", + }, { desc: "Partial write; first series with one OOO sample", input: func() []writev2.TimeSeries { @@ -543,7 +559,8 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { for _, ts := range writeV2RequestFixture.Timeseries { zeroHistogramIngested := false zeroFloatHistogramIngested := false - ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols) + ls, err := ts.ToLabels(&b, writeV2RequestFixture.Symbols) + require.NoError(t, err) for _, s := range ts.Samples { if ts.CreatedTimestamp != 0 && tc.ingestCTZeroSample { @@ -579,7 +596,9 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { } if tc.appendExemplarErr == nil { for _, e := range ts.Exemplars { - exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels + ex, err := e.ToExemplar(&b, writeV2RequestFixture.Symbols) + require.NoError(t, err) + exemplarLabels := ex.Labels requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j]) j++ } From 48c6c1a692a1e2e9a989e872542acc074348ff5a Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 4 Sep 2025 19:05:14 +0200 Subject: [PATCH 24/89] promql: Make HistogramStatsIterator.AtFloatHistogram idempotent Previously, multiple calls returned a wrong counter reset hint. This commit also includes a bunch of refactorings that partially have value on their own. However, the need for them was triggered by the additional work needed for idempotency, so I included them in this commit. Signed-off-by: beorn7 --- promql/histogram_stats_iterator.go | 92 +++++++++++++++---------- promql/histogram_stats_iterator_test.go | 15 +++- 2 files changed, 69 insertions(+), 38 deletions(-) diff --git a/promql/histogram_stats_iterator.go b/promql/histogram_stats_iterator.go index 408b3cfa6d..e58cc7d848 100644 --- a/promql/histogram_stats_iterator.go +++ b/promql/histogram_stats_iterator.go @@ -29,15 +29,16 @@ import ( type HistogramStatsIterator struct { chunkenc.Iterator - currentFH *histogram.FloatHistogram - lastFH *histogram.FloatHistogram + current *histogram.FloatHistogram + last *histogram.FloatHistogram + lastIsCurrent bool } // NewHistogramStatsIterator creates a new HistogramStatsIterator. func NewHistogramStatsIterator(it chunkenc.Iterator) *HistogramStatsIterator { return &HistogramStatsIterator{ - Iterator: it, - currentFH: &histogram.FloatHistogram{}, + Iterator: it, + current: &histogram.FloatHistogram{}, } } @@ -45,12 +46,14 @@ func NewHistogramStatsIterator(it chunkenc.Iterator) *HistogramStatsIterator { // objects already allocated where possible. func (hsi *HistogramStatsIterator) Reset(it chunkenc.Iterator) { hsi.Iterator = it - hsi.lastFH = nil + hsi.last = nil + hsi.lastIsCurrent = false } // Next mostly relays to the underlying iterator, but changes a ValHistogram // return into a ValFloatHistogram return. func (hsi *HistogramStatsIterator) Next() chunkenc.ValueType { + hsi.lastIsCurrent = false vt := hsi.Iterator.Next() if vt == chunkenc.ValHistogram { return chunkenc.ValFloatHistogram @@ -62,9 +65,10 @@ func (hsi *HistogramStatsIterator) Next() chunkenc.ValueType { // return into a ValFloatHistogram return. func (hsi *HistogramStatsIterator) Seek(t int64) chunkenc.ValueType { // If the Seek is going to move the iterator, we have to forget the - // lastFH. + // lastFH and mark the currentFH as not current anymore. if t > hsi.AtT() { - hsi.lastFH = nil + hsi.last = nil + hsi.lastIsCurrent = false } vt := hsi.Iterator.Seek(t) if vt == chunkenc.ValHistogram { @@ -83,47 +87,65 @@ func (*HistogramStatsIterator) AtHistogram(*histogram.Histogram) (int64, *histog // hint (not UnknownCounterReset) if the previous sample has been accessed with // the same iterator. func (hsi *HistogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { - var t int64 - t, hsi.currentFH = hsi.Iterator.AtFloatHistogram(hsi.currentFH) - if value.IsStaleNaN(hsi.currentFH.Sum) { - return t, &histogram.FloatHistogram{Sum: hsi.currentFH.Sum} + populateFH := func(src *histogram.FloatHistogram, detectReset bool) { + h := histogram.FloatHistogram{ + CounterResetHint: src.CounterResetHint, + Count: src.Count, + Sum: src.Sum, + } + if detectReset { + h.CounterResetHint = hsi.getResetHint(src.CounterResetHint) + } + if fh == nil { + // Note that we cannot simply write `fh = &h` here + // because that would let h escape to the heap. + fh = &histogram.FloatHistogram{} + *fh = h + } else { + h.CopyTo(fh) + } } - if fh == nil { - fh = &histogram.FloatHistogram{ - CounterResetHint: hsi.getFloatResetHint(hsi.currentFH.CounterResetHint), - Count: hsi.currentFH.Count, - Sum: hsi.currentFH.Sum, - } - hsi.setLastFH(hsi.currentFH) + if hsi.lastIsCurrent { + // Nothing changed since last AtFloatHistogram call. Return a + // copy of the stored last histogram rather than doing counter + // reset detection again (which would yield a potentially wrong + // result of "no counter reset"). + populateFH(hsi.last, false) + return hsi.AtT(), fh + } + + var t int64 + t, hsi.current = hsi.Iterator.AtFloatHistogram(hsi.current) + if value.IsStaleNaN(hsi.current.Sum) { + populateFH(hsi.current, false) return t, fh } - - returnValue := histogram.FloatHistogram{ - CounterResetHint: hsi.getFloatResetHint(hsi.currentFH.CounterResetHint), - Count: hsi.currentFH.Count, - Sum: hsi.currentFH.Sum, - } - returnValue.CopyTo(fh) - - hsi.setLastFH(hsi.currentFH) + populateFH(hsi.current, true) + hsi.setLastFromCurrent(fh.CounterResetHint) return t, fh } -func (hsi *HistogramStatsIterator) setLastFH(fh *histogram.FloatHistogram) { - if hsi.lastFH == nil { - hsi.lastFH = fh.Copy() +// setLastFromCurrent stores a copy of hsi.current as hsi.last. The +// CounterResetHint of hsi.last is set to the provided value, though. This is +// meant to store the value we have calculated on the fly so that we can return +// the same without re-calculation in case AtFloatHistogram is called multiple +// times. +func (hsi *HistogramStatsIterator) setLastFromCurrent(hint histogram.CounterResetHint) { + if hsi.last == nil { + hsi.last = hsi.current.Copy() } else { - fh.CopyTo(hsi.lastFH) + hsi.current.CopyTo(hsi.last) } + hsi.last.CounterResetHint = hint + hsi.lastIsCurrent = true } -func (hsi *HistogramStatsIterator) getFloatResetHint(hint histogram.CounterResetHint) histogram.CounterResetHint { +func (hsi *HistogramStatsIterator) getResetHint(hint histogram.CounterResetHint) histogram.CounterResetHint { if hint != histogram.UnknownCounterReset { return hint } - prevFH := hsi.lastFH - if prevFH == nil { + if hsi.last == nil { // We don't know if there's a counter reset. Note that this // generally will trigger an explicit counter reset detection by // the PromQL engine, which in turn isn't as reliable in this @@ -134,7 +156,7 @@ func (hsi *HistogramStatsIterator) getFloatResetHint(hint histogram.CounterReset // place. return histogram.UnknownCounterReset } - if hsi.currentFH.DetectReset(prevFH) { + if hsi.current.DetectReset(hsi.last) { return histogram.CounterReset } return histogram.NotCounterReset diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go index b9d37ce3b9..80bfee519d 100644 --- a/promql/histogram_stats_iterator_test.go +++ b/promql/histogram_stats_iterator_test.go @@ -116,10 +116,16 @@ func TestHistogramStatsDecoding(t *testing.T) { t.Run(tc.name, func(t *testing.T) { check := func(statsIterator *HistogramStatsIterator) { decodedStats := make([]*histogram.FloatHistogram, 0) - for statsIterator.Next() != chunkenc.ValNone { - _, h := statsIterator.AtFloatHistogram(nil) - decodedStats = append(decodedStats, h) + for typ := statsIterator.Next(); typ != chunkenc.ValNone; typ = statsIterator.Next() { + require.Equal(t, chunkenc.ValFloatHistogram, typ) + t1, h1 := statsIterator.AtFloatHistogram(nil) + // Call AtFloatHistogram again to check for idempotency. + t2, h2 := statsIterator.AtFloatHistogram(nil) + require.Equal(t, t1, t2) + require.True(t, h1.Equals(h2)) // require.Equal does not work with sum=NaN. + decodedStats = append(decodedStats, h1) } + require.NoError(t, statsIterator.Err()) for i := 0; i < len(tc.histograms); i++ { require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint) fh := tc.histograms[i].ToFloat(nil) @@ -170,6 +176,9 @@ func TestHistogramStatsMixedUse(t *testing.T) { typ = statsIterator.Next() require.Equal(t, chunkenc.ValFloatHistogram, typ) _, h = statsIterator.AtFloatHistogram(nil) + // We call AtFloatHistogram here again "randomly" to check idempotency. + _, h2 := statsIterator.AtFloatHistogram(nil) + require.True(t, h.Equals(h2)) actualHints[1] = h.CounterResetHint typ = statsIterator.Next() require.Equal(t, chunkenc.ValFloatHistogram, typ) From 121de76cbb224b53582627363b1b4ccbaa5221ba Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 9 Sep 2025 13:40:54 +0200 Subject: [PATCH 25/89] promqltest: Remove now needless `1*` work-around Prior to #17127, we needed to add another level in the AST to trigger the usage of `HistogramStatsIterator`. This is fixed now. Signed-off-by: beorn7 --- promql/promqltest/testdata/native_histograms.test | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index eb8cfb3432..83933e1e7f 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1635,31 +1635,28 @@ load 1m # Trigger an annotation about conflicting counter resets by going through the # HistogramStatsIterator, which creates counter reset hints on the fly. -eval instant at 5m 1*histogram_count(sum_over_time(reset{timing="late"}[5m])) +eval instant at 5m histogram_count(sum_over_time(reset{timing="late"}[5m])) expect warn msg: PromQL warning: conflicting counter resets during histogram addition {timing="late"} 7 -eval instant at 5m 1*histogram_count(sum(reset)) +eval instant at 5m histogram_count(sum(reset)) expect warn msg: PromQL warning: conflicting counter resets during histogram aggregation {} 5 -eval instant at 5m 1*histogram_count(avg(reset)) +eval instant at 5m histogram_count(avg(reset)) expect warn msg: PromQL warning: conflicting counter resets during histogram aggregation {} 2.5 # No annotation with the right timing. -eval instant at 30s 1*histogram_count(sum(reset)) +eval instant at 30s histogram_count(sum(reset)) expect no_warn {} 3 -eval instant at 30s 1*histogram_count(avg(reset)) +eval instant at 30s histogram_count(avg(reset)) expect no_warn {} 1.5 # Ensure that the annotation does not happen with rate. -eval instant at 5m 1*histogram_count(rate(reset{timing="late"}[5m])) +eval instant at 5m histogram_count(rate(reset{timing="late"}[5m])) expect no_warn {timing="late"} 0.0175 - -# NOTE: The `1*` part in the expressions above should not be needed. -# It can be removed once https://github.com/prometheus/prometheus/pull/17127 is merged. From dfb24f4ba0b2d9b2a7006797b0aaf435a94372af Mon Sep 17 00:00:00 2001 From: machine424 Date: Tue, 9 Sep 2025 15:24:28 +0200 Subject: [PATCH 26/89] chore: prepare release 3.6.0-rc.1 Signed-off-by: machine424 --- CHANGELOG.md | 4 +++- VERSION | 2 +- web/ui/mantine-ui/package.json | 4 ++-- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- 7 files changed, 17 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cadfe85d92..f57fbdb976 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,9 @@ ## main / unreleased -* [ENHANCEMENT] TSDB: Track stale series in the Head block based on stale sample. #16925 +## 3.6.0-rc.1 / 2025-09-09 + +* [ENHANCEMENT] TSDB: Track stale series in the Head block via the `prometheus_tsdb_head_stale_series` metric. #16925 ## 3.6.0-rc.0 / 2025-08-12 diff --git a/VERSION b/VERSION index 0b3d28b729..52b3f679ad 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.6.0-rc.0 +3.6.0-rc.1 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 404f1db99a..bbed97d029 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.306.0-rc.0", + "version": "0.306.0-rc.1", "type": "module", "scripts": { "start": "vite", @@ -28,7 +28,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.306.0-rc.0", + "@prometheus-io/codemirror-promql": "0.306.0-rc.1", "@reduxjs/toolkit": "^2.7.0", "@tabler/icons-react": "^3.31.0", "@tanstack/react-query": "^5.74.7", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 7619365062..08bee2bbf0 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.306.0-rc.0", + "version": "0.306.0-rc.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.306.0-rc.0", + "@prometheus-io/lezer-promql": "0.306.0-rc.1", "lru-cache": "^11.1.0" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index a3605833af..9242fcd243 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.306.0-rc.0", + "version": "0.306.0-rc.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f9f210d945..b2d888cab2 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.306.0-rc.0", + "version": "0.306.0-rc.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.306.0-rc.0", + "version": "0.306.0-rc.1", "workspaces": [ "mantine-ui", "module/*" @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.306.0-rc.0", + "version": "0.306.0-rc.1", "dependencies": { "@codemirror/autocomplete": "^6.18.6", "@codemirror/language": "^6.11.0", @@ -42,7 +42,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.306.0-rc.0", + "@prometheus-io/codemirror-promql": "0.306.0-rc.1", "@reduxjs/toolkit": "^2.7.0", "@tabler/icons-react": "^3.31.0", "@tanstack/react-query": "^5.74.7", @@ -189,10 +189,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.306.0-rc.0", + "version": "0.306.0-rc.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.306.0-rc.0", + "@prometheus-io/lezer-promql": "0.306.0-rc.1", "lru-cache": "^11.1.0" }, "devDependencies": { @@ -222,7 +222,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.306.0-rc.0", + "version": "0.306.0-rc.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.3", diff --git a/web/ui/package.json b/web/ui/package.json index a5a1190db6..1161f4931b 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.306.0-rc.0", + "version": "0.306.0-rc.1", "private": true, "scripts": { "build": "bash build_ui.sh --all", From c84cf3622f750596077de02be0a9c9283c7e5519 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 4 Sep 2025 00:26:31 +0200 Subject: [PATCH 27/89] promql: Add a two-legged benchmark for HistogramStatsIterator Signed-off-by: beorn7 --- promql/bench_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/promql/bench_test.go b/promql/bench_test.go index d425565788..92831de346 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -350,6 +350,14 @@ func BenchmarkNativeHistograms(b *testing.B) { name: "histogram_count with long rate interval", query: "histogram_count(sum(rate(native_histogram_series[20m])))", }, + { + name: "two-legged histogram_count/sum with short rate interval", + query: "histogram_count(sum(rate(native_histogram_series[2m]))) + histogram_sum(sum(rate(native_histogram_series[2m])))", + }, + { + name: "two-legged histogram_count/sum with long rate interval", + query: "histogram_count(sum(rate(native_histogram_series[20m]))) + histogram_sum(sum(rate(native_histogram_series[20m])))", + }, } opts := promql.EngineOpts{ From 0fa70e0f6c129e121669ee9823f22d4e4993966d Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 3 Sep 2025 19:21:21 +0200 Subject: [PATCH 28/89] promql: Use `HistogramStatsIterator` more often The current code stops the walk after we have found the first relevant function. However, in expressions with multiple legs, we will then use the `HistogramStatsIterator` at most once. This change should make sure we explore all legs. The added tests make sure we are not using `HistogramStatsIterator` where we shouldn't (but the opposite can only be seen in a benchmark or with a more explicit test). Signed-off-by: beorn7 --- promql/engine.go | 2 +- .../promqltest/testdata/native_histograms.test | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/promql/engine.go b/promql/engine.go index 91257eae37..edafe4fd43 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3920,7 +3920,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) { break pathLoop } } - return errors.New("stop") + return nil }) } diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 83933e1e7f..f0c510bd50 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1660,3 +1660,20 @@ eval instant at 30s histogram_count(avg(reset)) eval instant at 5m histogram_count(rate(reset{timing="late"}[5m])) expect no_warn {timing="late"} 0.0175 + +clear + +# Test edge cases of HistogramStatsIterator detection. +# We access the same series multiple times within the same expression, +# once with and once without HistogramStatsIterator. The results here +# at least prove that we do not use HistogramStatsIterator where we +# should not. +load 1m + histogram {{schema:0 count:10 sum:50 counter_reset_hint:gauge buckets:[1 2 3 4]}} + +eval instant at 1m histogram_count(histogram unless histogram_quantile(0.5, histogram) < 3) + {} 10 + +eval instant at 1m histogram_quantile(0.5, histogram unless histogram_count(histogram) == 0) + {} 3.1748021039363987 + From 594f9d63a5f9635d48d6e26b68b708e21630f9ee Mon Sep 17 00:00:00 2001 From: NamanParlecha Date: Thu, 11 Sep 2025 15:19:42 +0530 Subject: [PATCH 29/89] refactor(textparse): Introduce Variadic options in textParse.New (#17155) * refactor(textparse): introduce ParserOptions struct for cleaner parser initialization Signed-off-by: Naman-B-Parlecha * refactor(fuzz): update fuzzParseMetricWithContentType to use ParserOptions Signed-off-by: Naman-B-Parlecha * refactor(parser): simplify ParserOptions usage in tests and implementations Signed-off-by: Naman-B-Parlecha * refactor(parse): using variadic options Signed-off-by: Naman-B-Parlecha * refactor(parser): add fallbackType & SymbolTable to variadic options Signed-off-by: Naman-B-Parlecha * refactor(parser): private fields Signed-off-by: Naman-B-Parlecha * refactor(scrape): compose parser options Signed-off-by: Naman-B-Parlecha * refactor(parser): add comments Signed-off-by: Naman-B-Parlecha * refactor(parser): update to use ParserOptions struct for configuration Signed-off-by: Naman-B-Parlecha * refactor(scrape): remove unused parserOptions field from scrapeLoop Signed-off-by: Naman-B-Parlecha * refactor(parser): update ParserOptions field names and add comments for clarity Signed-off-by: Naman-B-Parlecha --------- Signed-off-by: Naman-B-Parlecha --- model/textparse/benchmark_test.go | 2 +- model/textparse/interface.go | 42 +++++++++++++++++++++++++------ model/textparse/interface_test.go | 2 +- model/textparse/nhcbparse_test.go | 16 ++++++------ promql/fuzz.go | 2 +- scrape/scrape.go | 21 ++++++++++------ scrape/scrape_test.go | 2 +- 7 files changed, 60 insertions(+), 27 deletions(-) diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go index 59ca349e31..a6fbd4ccd1 100644 --- a/model/textparse/benchmark_test.go +++ b/model/textparse/benchmark_test.go @@ -157,7 +157,7 @@ func benchParse(b *testing.B, data []byte, parser string) { } case "omtext_with_nhcb": newParserFn = func(buf []byte, st *labels.SymbolTable) Parser { - p, err := New(buf, "application/openmetrics-text", "", false, true, false, false, st) + p, err := New(buf, "application/openmetrics-text", st, ParserOptions{ConvertClassicHistogramsToNHCB: true}) require.NoError(b, err) return p } diff --git a/model/textparse/interface.go b/model/textparse/interface.go index c4b0aad5e8..d4749c3da6 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -122,6 +122,28 @@ func extractMediaType(contentType, fallbackType string) (string, error) { return fallbackType, fmt.Errorf("received unsupported Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType) } +type ParserOptions struct { + // EnableTypeAndUnitLabels enables parsing and inclusion of type and unit labels + // in the parsed metrics. + EnableTypeAndUnitLabels bool + + // ConvertClassicHistogramsToNHCB enables conversion of classic histograms + // to native histogram custom buckets (NHCB) format. + ConvertClassicHistogramsToNHCB bool + + // KeepClassicOnClassicAndNativeHistograms causes parser to output classic histogram + // that is also present as a native histogram. (Proto parsing only). + KeepClassicOnClassicAndNativeHistograms bool + + // OpenMetricsSkipCTSeries determines whether to skip `_created` timestamp series + // during (OpenMetrics parsing only). + OpenMetricsSkipCTSeries bool + + // FallbackContentType specifies the fallback content type to use when the provided + // Content-Type header cannot be parsed or is not supported. + FallbackContentType string +} + // New returns a new parser of the byte slice. // // This function no longer guarantees to return a valid parser. @@ -130,27 +152,31 @@ func extractMediaType(contentType, fallbackType string) (string, error) { // An error may also be returned if fallbackType had to be used or there was some // other error parsing the supplied Content-Type. // If the returned parser is nil then the scrape must fail. -func New(b []byte, contentType, fallbackType string, parseClassicHistograms, convertClassicHistogramsToNHCB, skipOMCTSeries, enableTypeAndUnitLabels bool, st *labels.SymbolTable) (Parser, error) { - mediaType, err := extractMediaType(contentType, fallbackType) +func New(b []byte, contentType string, st *labels.SymbolTable, opts ParserOptions) (Parser, error) { + if st == nil { + st = labels.NewSymbolTable() + } + + mediaType, err := extractMediaType(contentType, opts.FallbackContentType) // err may be nil or something we want to warn about. var baseParser Parser switch mediaType { case "application/openmetrics-text": baseParser = NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) { - o.skipCTSeries = skipOMCTSeries - o.enableTypeAndUnitLabels = enableTypeAndUnitLabels + o.skipCTSeries = opts.OpenMetricsSkipCTSeries + o.enableTypeAndUnitLabels = opts.EnableTypeAndUnitLabels }) case "application/vnd.google.protobuf": - return NewProtobufParser(b, parseClassicHistograms, convertClassicHistogramsToNHCB, enableTypeAndUnitLabels, st), err + return NewProtobufParser(b, opts.KeepClassicOnClassicAndNativeHistograms, opts.ConvertClassicHistogramsToNHCB, opts.EnableTypeAndUnitLabels, st), err case "text/plain": - baseParser = NewPromParser(b, st, enableTypeAndUnitLabels) + baseParser = NewPromParser(b, st, opts.EnableTypeAndUnitLabels) default: return nil, err } - if baseParser != nil && convertClassicHistogramsToNHCB { - baseParser = NewNHCBParser(baseParser, st, parseClassicHistograms) + if baseParser != nil && opts.ConvertClassicHistogramsToNHCB { + baseParser = NewNHCBParser(baseParser, st, opts.KeepClassicOnClassicAndNativeHistograms) } return baseParser, err diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index bafd3df793..532c474845 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -168,7 +168,7 @@ func TestNewParser(t *testing.T) { fallbackProtoMediaType := tt.fallbackScrapeProtocol.HeaderMediaType() - p, err := New([]byte{}, tt.contentType, fallbackProtoMediaType, false, false, false, false, labels.NewSymbolTable()) + p, err := New([]byte{}, tt.contentType, labels.NewSymbolTable(), ParserOptions{FallbackContentType: fallbackProtoMediaType}) tt.validateParser(t, p) if tt.err == "" { require.NoError(t, err) diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index f3f5b9c444..f5836b5f7f 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -443,7 +443,7 @@ foobar{quantile="0.99"} 150.1` }, } - p, err := New([]byte(input), "application/openmetrics-text", "", false, true, false, false, labels.NewSymbolTable()) + p, err := New([]byte(input), "application/openmetrics-text", labels.NewSymbolTable(), ParserOptions{ConvertClassicHistogramsToNHCB: true}) require.NoError(t, err) require.NotNil(t, p) got := testParse(t, p) @@ -509,7 +509,7 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000 }, } - p, err := New([]byte(input), "application/openmetrics-text", "", false, true, false, false, labels.NewSymbolTable()) + p, err := New([]byte(input), "application/openmetrics-text", labels.NewSymbolTable(), ParserOptions{ConvertClassicHistogramsToNHCB: true}) require.NoError(t, err) require.NotNil(t, p) got := testParse(t, p) @@ -596,21 +596,21 @@ func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) { func() (string, parserFactory, []int, parserOptions) { factory := func(keepClassic, nhcb bool) (Parser, error) { inputBuf := createTestProtoBufHistogram(t) - return New(inputBuf.Bytes(), "application/vnd.google.protobuf", "", keepClassic, nhcb, false, false, labels.NewSymbolTable()) + return New(inputBuf.Bytes(), "application/vnd.google.protobuf", labels.NewSymbolTable(), ParserOptions{KeepClassicOnClassicAndNativeHistograms: keepClassic, ConvertClassicHistogramsToNHCB: nhcb}) } return "ProtoBuf", factory, []int{1, 2, 3}, parserOptions{useUTF8sep: true, hasCreatedTimeStamp: true} }, func() (string, parserFactory, []int, parserOptions) { factory := func(keepClassic, nhcb bool) (Parser, error) { input := createTestOpenMetricsHistogram() - return New([]byte(input), "application/openmetrics-text", "", keepClassic, nhcb, false, false, labels.NewSymbolTable()) + return New([]byte(input), "application/openmetrics-text", labels.NewSymbolTable(), ParserOptions{KeepClassicOnClassicAndNativeHistograms: keepClassic, ConvertClassicHistogramsToNHCB: nhcb}) } return "OpenMetrics", factory, []int{1}, parserOptions{hasCreatedTimeStamp: true} }, func() (string, parserFactory, []int, parserOptions) { factory := func(keepClassic, nhcb bool) (Parser, error) { input := createTestPromHistogram() - return New([]byte(input), "text/plain", "", keepClassic, nhcb, false, false, labels.NewSymbolTable()) + return New([]byte(input), "text/plain", labels.NewSymbolTable(), ParserOptions{KeepClassicOnClassicAndNativeHistograms: keepClassic, ConvertClassicHistogramsToNHCB: nhcb}) } return "Prometheus", factory, []int{1}, parserOptions{} }, @@ -956,7 +956,7 @@ something_bucket{a="b",le="+Inf"} 9 }, } - p, err := New([]byte(input), "application/openmetrics-text", "", false, true, false, false, labels.NewSymbolTable()) + p, err := New([]byte(input), "application/openmetrics-text", labels.NewSymbolTable(), ParserOptions{ConvertClassicHistogramsToNHCB: true}) require.NoError(t, err) require.NotNil(t, p) got := testParse(t, p) @@ -1087,7 +1087,7 @@ metric: < }, } - p, err := New(buf.Bytes(), "application/vnd.google.protobuf", "", false, true, false, false, labels.NewSymbolTable()) + p, err := New(buf.Bytes(), "application/vnd.google.protobuf", labels.NewSymbolTable(), ParserOptions{ConvertClassicHistogramsToNHCB: true}) require.NoError(t, err) require.NotNil(t, p) got := testParse(t, p) @@ -1149,7 +1149,7 @@ metric: < for _, tc := range testCases { t.Run(tc.typ, func(t *testing.T) { - p, err := New(tc.input, tc.typ, "", false, true, false, false, labels.NewSymbolTable()) + p, err := New(tc.input, tc.typ, labels.NewSymbolTable(), ParserOptions{ConvertClassicHistogramsToNHCB: true}) require.NoError(t, err) require.NotNil(t, p) diff --git a/promql/fuzz.go b/promql/fuzz.go index 73c37c8198..a71a63f8eb 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -61,7 +61,7 @@ const ( var symbolTable = labels.NewSymbolTable() func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType, "", false, false, false, false, symbolTable) + p, warning := textparse.New(in, contentType, symbolTable, textparse.ParserOptions{}) if p == nil || warning != nil { // An invalid content type is being passed, which should not happen // in this context. diff --git a/scrape/scrape.go b/scrape/scrape.go index 56ff31c60c..93047c474c 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -930,16 +930,17 @@ type scrapeLoop struct { labelLimits *labelLimits interval time.Duration timeout time.Duration - alwaysScrapeClassicHist bool - convertClassicHistToNHCB bool validationScheme model.ValidationScheme escapingScheme model.EscapingScheme + + alwaysScrapeClassicHist bool + convertClassicHistToNHCB bool + enableCTZeroIngestion bool + enableTypeAndUnitLabels bool fallbackScrapeProtocol string // Feature flagged options. enableNativeHistogramIngestion bool - enableCTZeroIngestion bool - enableTypeAndUnitLabels bool appender func(ctx context.Context) storage.Appender symbolTable *labels.SymbolTable @@ -1305,16 +1306,16 @@ func newScrapeLoop(ctx context.Context, timeout: timeout, alwaysScrapeClassicHist: alwaysScrapeClassicHist, convertClassicHistToNHCB: convertClassicHistToNHCB, - enableNativeHistogramIngestion: enableNativeHistogramIngestion, enableCTZeroIngestion: enableCTZeroIngestion, enableTypeAndUnitLabels: enableTypeAndUnitLabels, + fallbackScrapeProtocol: fallbackScrapeProtocol, + enableNativeHistogramIngestion: enableNativeHistogramIngestion, reportExtraMetrics: reportExtraMetrics, appendMetadataToWAL: appendMetadataToWAL, metrics: metrics, skipOffsetting: skipOffsetting, validationScheme: validationScheme, escapingScheme: escapingScheme, - fallbackScrapeProtocol: fallbackScrapeProtocol, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1634,7 +1635,13 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, return } - p, err := textparse.New(b, contentType, sl.fallbackScrapeProtocol, sl.alwaysScrapeClassicHist, sl.convertClassicHistToNHCB, sl.enableCTZeroIngestion, sl.enableTypeAndUnitLabels, sl.symbolTable) + p, err := textparse.New(b, contentType, sl.symbolTable, textparse.ParserOptions{ + EnableTypeAndUnitLabels: sl.enableTypeAndUnitLabels, + ConvertClassicHistogramsToNHCB: sl.convertClassicHistToNHCB, + KeepClassicOnClassicAndNativeHistograms: sl.alwaysScrapeClassicHist, + OpenMetricsSkipCTSeries: sl.enableCTZeroIngestion, + FallbackContentType: sl.fallbackScrapeProtocol, + }) if p == nil { sl.l.Error( "Failed to determine correct type of scrape target.", diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index ed9bdec936..953a5c33c4 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -2097,7 +2097,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { fakeRef := storage.SeriesRef(1) expValue := float64(1) metric := []byte(`metric{n="1"} 1`) - p, warning := textparse.New(metric, "text/plain", "", false, false, false, false, labels.NewSymbolTable()) + p, warning := textparse.New(metric, "text/plain", labels.NewSymbolTable(), textparse.ParserOptions{}) require.NotNil(t, p) require.NoError(t, warning) From 1c974108f36b8627a615073a4c64d566c88b34c8 Mon Sep 17 00:00:00 2001 From: Aditya Tiwari Date: Thu, 11 Sep 2025 19:22:58 +0530 Subject: [PATCH 30/89] docs: fix typos and formatting in querying functions and storage Signed-off-by: Aditya Tiwari --- docs/querying/functions.md | 23 +++++++++++------------ docs/storage.md | 8 ++++---- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 4776490e5a..0cae149dd7 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -4,8 +4,7 @@ nav_title: Functions sort_rank: 3 --- -Some functions have default arguments, e.g. `year(v=vector(time()) -instant-vector)`. This means that there is one argument `v` which is an instant +Some functions have default arguments, e.g. `year(v=vector(time()) instant-vector)`. This means that there is one argument `v` which is an instant vector, which if not provided it will default to the value of the expression `vector(time())`. @@ -106,14 +105,14 @@ vector are ignored silently. ## `day_of_month()` -`day_of_month(v=vector(time()) instant-vector)` interpretes float samples in +`day_of_month(v=vector(time()) instant-vector)` interprets float samples in `v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the day of the month (in UTC) for each of those timestamps. Returned values are from 1 to 31. Histogram samples in the input vector are ignored silently. ## `day_of_week()` -`day_of_week(v=vector(time()) instant-vector)` interpretes float samples in `v` +`day_of_week(v=vector(time()) instant-vector)` interprets float samples in `v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the day of the week (in UTC) for each of those timestamps. Returned values are from 0 to 6, where 0 means Sunday etc. Histogram samples in the input vector are @@ -121,7 +120,7 @@ ignored silently. ## `day_of_year()` -`day_of_year(v=vector(time()) instant-vector)` interpretes float samples in `v` +`day_of_year(v=vector(time()) instant-vector)` interprets float samples in `v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the day of the year (in UTC) for each of those timestamps. Returned values are from 1 to 365 for non-leap years, and 1 to 366 in leap years. Histogram samples in the @@ -129,7 +128,7 @@ input vector are ignored silently. ## `days_in_month()` -`days_in_month(v=vector(time()) instant-vector)` interpretes float samples in +`days_in_month(v=vector(time()) instant-vector)` interprets float samples in `v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the number of days in the month of each of those timestamps (in UTC). Returned values are from 28 to 31. Histogram samples in the input vector are ignored silently. @@ -266,7 +265,7 @@ histograms, it is easy to accidentally pick lower or upper values that are very far away from any bucket boundary, leading to large margins of error. Rather than using `histogram_fraction()` with classic histograms, it is often a more robust approach to directly act on the bucket series when calculating fractions. See the -[calculation of the Apdex scare](https://prometheus.io/docs/practices/histograms/#apdex-score) +[calculation of the Apdex score](https://prometheus.io/docs/practices/histograms/#apdex-score) as a typical example. For example, the following expression calculates the fraction of HTTP requests @@ -448,7 +447,7 @@ variance of observations for each histogram sample in `v`. ## `hour()` -`hour(v=vector(time()) instant-vector)` interpretes float samples in `v` as +`hour(v=vector(time()) instant-vector)` interprets float samples in `v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the hour of the day (in UTC) for each of those timestamps. Returned values are from 0 to 23. Histogram samples in the input vector are ignored silently. @@ -612,7 +611,7 @@ spikes are hard to read. Note that when combining `irate()` with an [aggregation operator](operators.md#aggregation-operators) (e.g. `sum()`) or a function aggregating over time (any function ending in `_over_time`), -always take a `irate()` first, then aggregate. Otherwise `irate()` cannot detect +always take an `irate()` first, then aggregate. Otherwise `irate()` cannot detect counter resets when your target restarts. ## `label_join()` @@ -674,14 +673,14 @@ cases are equivalent to those in `ln`. ## `minute()` -`minute(v=vector(time()) instant-vector)` interpretes float samples in `v` as +`minute(v=vector(time()) instant-vector)` interprets float samples in `v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the minute of the hour (in UTC) for each of those timestamps. Returned values are from 0 to 59. Histogram samples in the input vector are ignored silently. ## `month()` -`month(v=vector(time()) instant-vector)` interpretes float samples in `v` as +`month(v=vector(time()) instant-vector)` interprets float samples in `v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the month of the year (in UTC) for each of those timestamps. Returned values are from 1 to 12, where 1 means January etc. Histogram samples in the input vector are @@ -795,7 +794,7 @@ sorted by the values of the given labels in ascending order. In case these label values are equal, elements are sorted by their full label sets. `sort_by_label` acts on float and histogram samples in the same way. -Please note that `sort_by_label` only affect the results of instant queries, as +Please note that `sort_by_label` only affects the results of instant queries, as range query results always have a fixed output ordering. `sort_by_label` uses [natural sort diff --git a/docs/storage.md b/docs/storage.md index f472cce140..7b6e3bffe8 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -97,7 +97,7 @@ Prometheus has several flags that configure local storage. The most important ar (m-mapped Head chunks) directory combined (peaks every 2 hours). - `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL). Depending on your data, you can expect the WAL size to be halved with little extra - cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. + CPU load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. Note that once enabled, downgrading Prometheus to a version below 2.11.0 will require deleting the WAL. @@ -117,8 +117,8 @@ If your local storage becomes corrupted to the point where Prometheus will not start it is recommended to backup the storage directory and restore the corrupted block directories from your backups. If you do not have backups the last resort is to remove the corrupted files. For example you can try removing -individual block directories or the write-ahead-log (wal) files. Note that this -means losing the data for the time range those blocks or wal covers. +individual block directories or the write-ahead-log (WAL) files. Note that this +means losing the data for the time range those blocks or WAL covers. CAUTION: Non-POSIX compliant filesystems are not supported for Prometheus' local storage as unrecoverable corruptions may happen. NFS filesystems @@ -213,7 +213,7 @@ procedure, as they cannot be represented in the OpenMetrics format. ### Usage -Backfilling can be used via the Promtool command line. Promtool will write the blocks +Backfilling can be used via the `promtool` command line. `promtool` will write the blocks to a directory. By default this output directory is ./data/, you can change it by using the name of the desired output directory as an optional argument in the sub-command. From bdf547ae9c25fe95f755b53343bcaa6b3d6bbbd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Sat, 13 Sep 2025 16:25:21 +0200 Subject: [PATCH 31/89] fix(nativehistograms): validation should fail on unsupported schemas MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Histogram.Validate and FloatHistogram.Validate now return error on unsupported schemas. Scrape and remote-write handler reduces the schema to the maximum allowed if it is above the maximum, but below theoretical maximum of 52. For scrape the maximum is a configuration option, for remote-write it is 8. Note: OTLP endpont already does the reduction, without checking that it is below 52 as the spec does not specify a maximum. Signed-off-by: György Krajcsovits --- model/histogram/float_histogram.go | 7 +- model/histogram/generic.go | 13 ++- model/histogram/histogram.go | 7 +- model/histogram/histogram_test.go | 12 +++ scrape/target.go | 4 +- .../prometheusremotewrite/histograms.go | 10 +- storage/remote/write_handler.go | 21 ++-- storage/remote/write_handler_test.go | 97 +++++++++++++++++++ 8 files changed, 150 insertions(+), 21 deletions(-) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 2b78c6d630..b0e512fbc5 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -798,7 +798,8 @@ func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { // create false positives here. func (h *FloatHistogram) Validate() error { var nCount, pCount float64 - if h.UsesCustomBuckets() { + switch { + case IsCustomBucketsSchema(h.Schema): if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { return fmt.Errorf("custom buckets: %w", err) } @@ -814,7 +815,7 @@ func (h *FloatHistogram) Validate() error { if len(h.NegativeBuckets) > 0 { return errors.New("custom buckets: must not have negative buckets") } - } else { + case IsExponentialSchema(h.Schema): if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { return fmt.Errorf("positive side: %w", err) } @@ -828,6 +829,8 @@ func (h *FloatHistogram) Validate() error { if h.CustomValues != nil { return errors.New("histogram with exponential schema must not have custom bounds") } + default: + return fmt.Errorf("schema %d: %w", h.Schema, ErrHistogramsInvalidSchema) } err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) if err != nil { diff --git a/model/histogram/generic.go b/model/histogram/generic.go index 90a94a5600..4c0940a1f6 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -21,9 +21,11 @@ import ( ) const ( - ExponentialSchemaMax int32 = 8 - ExponentialSchemaMin int32 = -4 - CustomBucketsSchema int32 = -53 + ExponentialSchemaMax int32 = 8 + ExponentialSchemaMaxReserved int32 = 52 + ExponentialSchemaMin int32 = -4 + ExponentialSchemaMinReserved int32 = -9 + CustomBucketsSchema int32 = -53 ) var ( @@ -37,6 +39,7 @@ var ( ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite") ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas") ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds") + ErrHistogramsInvalidSchema = errors.New("histogram has an invalid schema, which must be between -4 and 8 for exponential buckets, or -53 for custom buckets") ) func IsCustomBucketsSchema(s int32) bool { @@ -47,6 +50,10 @@ func IsExponentialSchema(s int32) bool { return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax } +func IsExponentialSchemaReserved(s int32) bool { + return s >= ExponentialSchemaMinReserved && s <= ExponentialSchemaMaxReserved +} + // BucketCount is a type constraint for the count in a bucket, which can be // float64 (for type FloatHistogram) or uint64 (for type Histogram). type BucketCount interface { diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index cfb63e6341..169be9a6ac 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -425,7 +425,8 @@ func resize[T any](items []T, n int) []T { // the total h.Count). func (h *Histogram) Validate() error { var nCount, pCount uint64 - if h.UsesCustomBuckets() { + switch { + case IsCustomBucketsSchema(h.Schema): if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { return fmt.Errorf("custom buckets: %w", err) } @@ -441,7 +442,7 @@ func (h *Histogram) Validate() error { if len(h.NegativeBuckets) > 0 { return errors.New("custom buckets: must not have negative buckets") } - } else { + case IsExponentialSchema(h.Schema): if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { return fmt.Errorf("positive side: %w", err) } @@ -455,6 +456,8 @@ func (h *Histogram) Validate() error { if h.CustomValues != nil { return errors.New("histogram with exponential schema must not have custom bounds") } + default: + return fmt.Errorf("schema %d: %w", h.Schema, ErrHistogramsInvalidSchema) } err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) if err != nil { diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index edc8663c94..35603bc01c 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -1565,6 +1565,18 @@ func TestHistogramValidation(t *testing.T) { CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8}, }, }, + "schema too high": { + h: &Histogram{ + Schema: 10, + }, + errMsg: `schema 10: histogram has an invalid schema, which must be between -4 and 8 for exponential buckets, or -53 for custom buckets`, + }, + "schema too low": { + h: &Histogram{ + Schema: -10, + }, + errMsg: `schema -10: histogram has an invalid schema, which must be between -4 and 8 for exponential buckets, or -53 for custom buckets`, + }, } for testName, tc := range tests { diff --git a/scrape/target.go b/scrape/target.go index 73fed40498..0af2b8ba14 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -414,12 +414,12 @@ type maxSchemaAppender struct { func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { - if histogram.IsExponentialSchema(h.Schema) && h.Schema > app.maxSchema { + if histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > app.maxSchema { h = h.ReduceResolution(app.maxSchema) } } if fh != nil { - if histogram.IsExponentialSchema(fh.Schema) && fh.Schema > app.maxSchema { + if histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > app.maxSchema { fh = fh.ReduceResolution(app.maxSchema) } } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index a694d2067a..0bc8a876e4 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -86,16 +86,16 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) { var annots annotations.Annotations scale := p.Scale() - if scale < -4 { + if scale < histogram.ExponentialSchemaMin { return nil, annots, fmt.Errorf("cannot convert exponential to native histogram."+ - " Scale must be >= -4, was %d", scale) + " Scale must be >= %d, was %d", histogram.ExponentialSchemaMin, scale) } var scaleDown int32 - if scale > 8 { - scaleDown = scale - 8 - scale = 8 + if scale > histogram.ExponentialSchemaMax { + scaleDown = scale - histogram.ExponentialSchemaMax + scale = histogram.ExponentialSchemaMax } pSpans, pDeltas := convertBucketsLayout(p.Positive().BucketCounts().AsRaw(), p.Positive().Offset(), scaleDown, true) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 7681655e61..ad0a2a13e0 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -229,7 +229,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err samplesWithInvalidLabels := 0 samplesAppended := 0 - app := &timeLimitAppender{ + app := &remoteWriteAppender{ Appender: h.appendable.Appender(ctx), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), } @@ -344,7 +344,7 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist // NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors. // Once we have 5xx type of error, we immediately stop and rollback all appends. func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ WriteResponseStats, errHTTPCode int, _ error) { - app := &timeLimitAppender{ + app := &remoteWriteAppender{ Appender: h.appendable.Appender(ctx), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), } @@ -616,7 +616,7 @@ type rwExporter struct { func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { otlpCfg := rw.config().OTLPConfig - app := &timeLimitAppender{ + app := &remoteWriteAppender{ Appender: rw.appendable.Appender(ctx), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), } @@ -719,13 +719,13 @@ func hasDelta(md pmetric.Metrics) bool { return false } -type timeLimitAppender struct { +type remoteWriteAppender struct { storage.Appender maxTime int64 } -func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { +func (app *remoteWriteAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if t > app.maxTime { return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) } @@ -737,11 +737,18 @@ func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, return ref, nil } -func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { +func (app *remoteWriteAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if t > app.maxTime { return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) } + if h != nil && histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > histogram.ExponentialSchemaMax { + h = h.ReduceResolution(histogram.ExponentialSchemaMax) + } + if fh != nil && histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > histogram.ExponentialSchemaMax { + fh = fh.ReduceResolution(histogram.ExponentialSchemaMax) + } + ref, err := app.Appender.AppendHistogram(ref, l, t, h, fh) if err != nil { return 0, err @@ -749,7 +756,7 @@ func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.La return ref, nil } -func (app *timeLimitAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { +func (app *remoteWriteAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { if e.Ts > app.maxTime { return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) } diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 5631e80732..78cbcdccf7 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -1134,3 +1134,100 @@ func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, l labels.Labels m.samples = append(m.samples, mockSample{l, ct, 0}) return storage.SeriesRef(hash), nil } + +var ( + highSchemaHistogram = &histogram.Histogram{ + Schema: 10, + PositiveSpans: []histogram.Span{ + { + Offset: -1, + Length: 2, + }, + }, + PositiveBuckets: []int64{1, 2}, + NegativeSpans: []histogram.Span{ + { + Offset: 0, + Length: 1, + }, + }, + NegativeBuckets: []int64{1}, + } + reducedSchemaHistogram = &histogram.Histogram{ + Schema: 8, + PositiveSpans: []histogram.Span{ + { + Offset: 0, + Length: 1, + }, + }, + PositiveBuckets: []int64{4}, + NegativeSpans: []histogram.Span{ + { + Offset: 0, + Length: 1, + }, + }, + NegativeBuckets: []int64{1}, + } +) + +func TestHistogramsReduction(t *testing.T) { + for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} { + t.Run(string(protoMsg), func(t *testing.T) { + appendable := &mockAppendable{} + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{protoMsg}, false) + + var ( + err error + payload []byte + ) + + if protoMsg == config.RemoteWriteProtoMsgV1 { + payload, _, _, err = buildWriteRequest(nil, []prompb.TimeSeries{ + { + Labels: []prompb.Label{{Name: "__name__", Value: "test_metric1"}}, + Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, highSchemaHistogram)}, + }, + { + Labels: []prompb.Label{{Name: "__name__", Value: "test_metric2"}}, + Histograms: []prompb.Histogram{prompb.FromFloatHistogram(2, highSchemaHistogram.ToFloat(nil))}, + }, + }, nil, nil, nil, nil, "snappy") + } else { + payload, _, _, err = buildV2WriteRequest(promslog.NewNopLogger(), []writev2.TimeSeries{ + { + LabelsRefs: []uint32{0, 1}, + Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, highSchemaHistogram)}, + }, + { + LabelsRefs: []uint32{0, 2}, + Histograms: []writev2.Histogram{writev2.FromFloatHistogram(2, highSchemaHistogram.ToFloat(nil))}, + }, + }, []string{"__name__", "test_metric1", "test_metric2"}, + nil, nil, nil, "snappy") + } + require.NoError(t, err) + + req, err := http.NewRequest("", "", bytes.NewReader(payload)) + require.NoError(t, err) + + req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[protoMsg]) + + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, req) + + resp := recorder.Result() + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, http.StatusNoContent, resp.StatusCode) + require.Empty(t, body) + + require.Len(t, appendable.histograms, 2) + require.Equal(t, int64(1), appendable.histograms[0].t) + require.Equal(t, reducedSchemaHistogram, appendable.histograms[0].h) + require.Equal(t, int64(2), appendable.histograms[1].t) + require.Equal(t, reducedSchemaHistogram.ToFloat(nil), appendable.histograms[1].fh) + }) + } +} From a14faab4351025b486fb5dd80362c3653cc319ff Mon Sep 17 00:00:00 2001 From: dancer1325 Date: Mon, 15 Sep 2025 17:25:12 +0200 Subject: [PATCH 32/89] docs(): fix gettingStarted outdated graph reference /graph does NOT exist anymore in the new React app. It has been refactored within /query Signed-off-by: dancer1325 --- docs/getting_started.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/getting_started.md b/docs/getting_started.md index aeba295da8..35f1a88a7d 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -79,7 +79,7 @@ navigating to its metrics endpoint: Let us explore data that Prometheus has collected about itself. To use Prometheus's built-in expression browser, navigate to -http://localhost:9090/graph and choose the "Table" view within the "Graph" tab. +http://localhost:9090/query and choose the "Graph" tab. As you can gather from [localhost:9090/metrics](http://localhost:9090/metrics), one metric that Prometheus exports about itself is named @@ -113,7 +113,7 @@ For more about the expression language, see the ## Using the graphing interface -To graph expressions, navigate to http://localhost:9090/graph and use the "Graph" +To graph expressions, navigate to http://localhost:9090/query and use the "Graph" tab. For example, enter the following expression to graph the per-second rate of chunks From aa922ce3b66af7065a4cef9bece23e6bce7e1357 Mon Sep 17 00:00:00 2001 From: Andrew Hall Date: Tue, 16 Sep 2025 19:28:19 +0800 Subject: [PATCH 33/89] Added support for string literals and range results for instant queries in test scripting framework (#17055) Signed-off-by: Andrew Hall Co-authored-by: Charles Korn Co-authored-by: Arve Knudsen --- promql/engine_test.go | 83 --------- promql/promqltest/README.md | 36 ++++ promql/promqltest/test.go | 157 +++++++++++++++--- promql/promqltest/test_test.go | 138 +++++++++++++++ promql/promqltest/testdata/literals.test | 15 ++ promql/promqltest/testdata/range_queries.test | 34 ++++ 6 files changed, 355 insertions(+), 108 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index f70036e3c0..e2aadd7a5d 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3195,89 +3195,6 @@ func TestEngine_Close(t *testing.T) { }) } -func TestInstantQueryWithRangeVectorSelector(t *testing.T) { - engine := newTestEngine(t) - - baseT := timestamp.Time(0) - storage := promqltest.LoadedStorage(t, ` - load 1m - some_metric{env="1"} 0+1x4 - some_metric{env="2"} 0+2x4 - some_metric{env="3"} {{count:0}}+{{count:1}}x4 - some_metric_with_stale_marker 0 1 stale 3 - `) - t.Cleanup(func() { require.NoError(t, storage.Close()) }) - - testCases := map[string]struct { - expr string - expected promql.Matrix - ts time.Time - }{ - "matches series with points in range": { - expr: "some_metric[2m]", - ts: baseT.Add(2 * time.Minute), - expected: promql.Matrix{ - { - Metric: labels.FromStrings("__name__", "some_metric", "env", "1"), - Floats: []promql.FPoint{ - {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1}, - {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 2}, - }, - }, - { - Metric: labels.FromStrings("__name__", "some_metric", "env", "2"), - Floats: []promql.FPoint{ - {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 2}, - {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 4}, - }, - }, - { - Metric: labels.FromStrings("__name__", "some_metric", "env", "3"), - Histograms: []promql.HPoint{ - {T: timestamp.FromTime(baseT.Add(time.Minute)), H: &histogram.FloatHistogram{Count: 1, CounterResetHint: histogram.NotCounterReset}}, - {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), H: &histogram.FloatHistogram{Count: 2, CounterResetHint: histogram.NotCounterReset}}, - }, - }, - }, - }, - "matches no series": { - expr: "some_nonexistent_metric[1m]", - ts: baseT, - expected: promql.Matrix{}, - }, - "no samples in range": { - expr: "some_metric[1m]", - ts: baseT.Add(20 * time.Minute), - expected: promql.Matrix{}, - }, - "metric with stale marker": { - expr: "some_metric_with_stale_marker[3m]", - ts: baseT.Add(3 * time.Minute), - expected: promql.Matrix{ - { - Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"), - Floats: []promql.FPoint{ - {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1}, - {T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3}, - }, - }, - }, - }, - } - - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - q, err := engine.NewInstantQuery(context.Background(), storage, nil, testCase.expr, testCase.ts) - require.NoError(t, err) - defer q.Close() - - res := q.Exec(context.Background()) - require.NoError(t, res.Err) - testutil.RequireEqual(t, testCase.expected, res.Value) - }) - } -} - func TestQueryLookbackDelta(t *testing.T) { var ( load = `load 5m diff --git a/promql/promqltest/README.md b/promql/promqltest/README.md index 84a0e69f3a..d26c01c6f1 100644 --- a/promql/promqltest/README.md +++ b/promql/promqltest/README.md @@ -106,8 +106,44 @@ eval range from to step * `` and `` specify the time range of the range query, and use the same syntax as `