Merge branch 'main' into slicelabels

Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
This commit is contained in:
Bryan Boreham 2025-05-02 10:31:57 +01:00 committed by GitHub
commit ca416c580c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
94 changed files with 4609 additions and 2051 deletions

View File

@ -11,7 +11,7 @@ jobs:
container: container:
# Whenever the Go version is updated here, .promu.yml # Whenever the Go version is updated here, .promu.yml
# should also be updated. # should also be updated.
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.24-base
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7 - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
@ -27,7 +27,7 @@ jobs:
name: More Go tests name: More Go tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.24-base
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7 - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
@ -59,7 +59,7 @@ jobs:
# Whenever the Go version is updated here, .promu.yml # Whenever the Go version is updated here, .promu.yml
# should also be updated. # should also be updated.
container: container:
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.24-base
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
@ -94,7 +94,7 @@ jobs:
# Whenever the Go version is updated here, .promu.yml # Whenever the Go version is updated here, .promu.yml
# should also be updated. # should also be updated.
container: container:
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.24-base
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- run: go install ./cmd/promtool/. - run: go install ./cmd/promtool/.

View File

@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@5f8171a638ada777af81d42b55959a643bb29017 # v3.28.12 uses: github/codeql-action/init@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@5f8171a638ada777af81d42b55959a643bb29017 # v3.28.12 uses: github/codeql-action/autobuild@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@5f8171a638ada777af81d42b55959a643bb29017 # v3.28.12 uses: github/codeql-action/analyze@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13

View File

@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard. # Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning" - name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@5f8171a638ada777af81d42b55959a643bb29017 # tag=v3.28.12 uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # tag=v3.28.13
with: with:
sarif_file: results.sarif sarif_file: results.sarif

View File

@ -4,9 +4,44 @@
* [CHANGE] Make setting out-of-order native histograms feature (`--enable-feature=ooo-native-histograms`) a no-op. Out-of-order native histograms are now always enabled when `out_of_order_time_window` is greater than zero and `--enable-feature=native-histograms` is set. #16207 * [CHANGE] Make setting out-of-order native histograms feature (`--enable-feature=ooo-native-histograms`) a no-op. Out-of-order native histograms are now always enabled when `out_of_order_time_window` is greater than zero and `--enable-feature=native-histograms` is set. #16207
* [FEATURE] OTLP translate: Add feature flag for optionally translating OTel explicit bucket histograms into native histograms with custom buckets. #15850 * [FEATURE] OTLP translate: Add feature flag for optionally translating OTel explicit bucket histograms into native histograms with custom buckets. #15850
* [FEATURE] OTLP translate: Add option to receive OTLP metrics without translating names or attributes. #16441
* [ENHANCEMENT] TSDB: add `prometheus_tsdb_wal_replay_unknown_refs_total` and `prometheus_tsdb_wbl_replay_unknown_refs_total` metrics to track unknown series references during WAL/WBL replay. #16166 * [ENHANCEMENT] TSDB: add `prometheus_tsdb_wal_replay_unknown_refs_total` and `prometheus_tsdb_wbl_replay_unknown_refs_total` metrics to track unknown series references during WAL/WBL replay. #16166
* [BUGFIX] TSDB: fix unknown series errors and possible lost data during WAL replay when series are removed from the head due to inactivity and reappear before the next WAL checkpoint. #16060 * [BUGFIX] TSDB: fix unknown series errors and possible lost data during WAL replay when series are removed from the head due to inactivity and reappear before the next WAL checkpoint. #16060
## 3.3.0 / 2025-04-15
* [FEATURE] PromQL: Implement `idelta()` and `irate()` for native histograms. #15853
* [ENHANCEMENT] Scaleway SD: Add `__meta_scaleway_instance_public_ipv4_addresses` and `__meta_scaleway_instance_public_ipv6_addresses` labels. #14228
* [ENHANCEMENT] TSDB: Reduce locking while reloading blocks. #12920
* [ENHANCEMENT] PromQL: Allow UTF-8 labels in `label_replace()`. #15974
* [ENHANCEMENT] Promtool: `tsdb create-blocks-from openmetrics` can now read from a Pipe. #16011
* [ENHANCEMENT] Rules: Add support for anchors and aliases in rule files. #14957
* [ENHANCEMENT] Dockerfile: Make `/prometheus` writable. #16073
* [ENHANCEMENT] API: Include scrape pool name for dropped targets in `/api/v1/targets`. #16085
* [ENHANCEMENT] UI: Improve time formatting and copying of selectors. #15999 #16165
* [ENHANCEMENT] UI: Bring back vertical grid lines and graph legend series toggling instructions. #16163 #16164
* [ENHANCEMENT] Mixin: The `cluster` label can be customized using `clusterLabel`. #15826
* [PERF] TSDB: Optimize some operations on head chunks by taking shortcuts. #12659
* [PERF] TSDB & Agent: Reduce memory footprint during WL replay. #15778
* [PERF] Remote-Write: Reduce memory footprint during WAL replay. #16197
* [PERF] API: Reduce memory footprint during header parsing. #16001
* [PERF] Rules: Improve dependency evaluation, enabling better concurrency. #16039
* [PERF] Scraping: Improve scraping performance for native histograms. #15731
* [PERF] Scraping: Improve parsing of created timestamps. #16072
* [BUGFIX] Scraping: Bump cache iteration after error to avoid false duplicate detections. #16174
* [BUGFIX] Scraping: Skip native histograms series when ingestion is disabled. #16218
* [BUGFIX] PromQL: Fix counter reset detection for native histograms. #15902 #15987
* [BUGFIX] PromQL: Fix inconsistent behavior with an empty range. #15970
* [BUGFIX] PromQL: Fix inconsistent annotation in `quantile_over_time()`. #16018
* [BUGFIX] PromQL: Prevent `label_join()` from producing duplicates. #15975
* [BUGFIX] PromQL: Ignore native histograms in `scalar()`, `sort()` and `sort_desc()`. #15964
* [BUGFIX] PromQL: Fix annotations for binary operations between incompatible native histograms. #15895
* [BUGFIX] Alerting: Consider alert relabeling when deciding whether alerts are dropped. #15979
* [BUGFIX] Config: Set `GoGC` to the default value in case of an empty configuration. #16052
* [BUGFIX] TSDB: Fix unknown series errors and potential data loss during WAL replay when inactive series are removed from the head and reappear before the next WAL checkpoint. #16060
* [BUGFIX] Scaleway SD: The public IP will no longer be set to `__meta_meta_scaleway_instance_public_ipv4` if it is an IPv6 address. #14228
* [BUGFIX] UI: Display the correct value of Alerting rules' `keep_firing_for`. #16211
## 3.2.1 / 2025-02-25 ## 3.2.1 / 2025-02-25
* [BUGFIX] Don't send Accept` header `escape=allow-utf-8` when `metric_name_validation_scheme: legacy` is configured. #16061 * [BUGFIX] Don't send Accept` header `escape=allow-utf-8` when `metric_name_validation_scheme: legacy` is configured. #16061

View File

@ -9,6 +9,7 @@ Maintainers for specific parts of the codebase:
* `cmd` * `cmd`
* `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl) * `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl)
* `discovery` * `discovery`
* `azure`: Jan-Otto Kröpke (<mail@jkroepke.de> / @jkroepke)
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz) * `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
* `documentation` * `documentation`
* `prometheus-mixin`: Matthias Loibl (<mail@matthiasloibl.com> / @metalmatze) * `prometheus-mixin`: Matthias Loibl (<mail@matthiasloibl.com> / @metalmatze)

View File

@ -8,12 +8,14 @@ Release cadence of first pre-releases being cut is 6 weeks.
Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/release-2.55/RELEASE.md) for the v2 release series schedule. Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/release-2.55/RELEASE.md) for the v2 release series schedule.
| release series | date of first pre-release (year-month-day) | release shepherd | | release series | date of first pre-release (year-month-day) | release shepherd |
|----------------|--------------------------------------------|-----------------------------------| |----------------|--------------------------------------------|------------------------------------|
| v3.0 | 2024-11-14 | Jan Fajerski (GitHub: @jan--f) | | v3.0 | 2024-11-14 | Jan Fajerski (GitHub: @jan--f) |
| v3.1 | 2024-12-17 | Bryan Boreham (GitHub: @bboreham) | | v3.1 | 2024-12-17 | Bryan Boreham (GitHub: @bboreham) |
| v3.2 | 2025-01-28 | Jan Fajerski (GitHub: @jan--f) | | v3.2 | 2025-01-28 | Jan Fajerski (GitHub: @jan--f) |
| v3.3 | 2025-03-11 | Ayoub Mrini (Github: @machine424) | | v3.3 | 2025-03-11 | Ayoub Mrini (Github: @machine424) |
| v3.4 | 2025-04-22 | **volunteer welcome** | | v3.4 | 2025-04-22 | Jan-Otto Kröpke (Github: @jkroepke)|
| v3.5 LTS | 2025-06-03 | Bryan Boreham (GitHub: @bboreham) |
| v3.6 | 2025-07-15 | **volunteer welcome** |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.

View File

@ -1 +1 @@
3.2.1 3.3.0

View File

@ -249,6 +249,9 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
case "promql-experimental-functions": case "promql-experimental-functions":
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true
logger.Info("Experimental PromQL functions enabled.") logger.Info("Experimental PromQL functions enabled.")
case "promql-duration-expr":
parser.ExperimentalDurationExpr = true
logger.Info("Experimental duration expression parsing enabled.")
case "native-histograms": case "native-histograms":
c.tsdb.EnableNativeHistograms = true c.tsdb.EnableNativeHistograms = true
c.scrape.EnableNativeHistogramsIngestion = true c.scrape.EnableNativeHistogramsIngestion = true
@ -279,12 +282,23 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
case "otlp-deltatocumulative": case "otlp-deltatocumulative":
c.web.ConvertOTLPDelta = true c.web.ConvertOTLPDelta = true
logger.Info("Converting delta OTLP metrics to cumulative") logger.Info("Converting delta OTLP metrics to cumulative")
case "otlp-native-delta-ingestion":
// Experimental OTLP native delta ingestion.
// This currently just stores the raw delta value as-is with unknown metric type. Better typing and
// type-aware functions may come later.
// See proposal: https://github.com/prometheus/proposals/pull/48
c.web.NativeOTLPDeltaIngestion = true
logger.Info("Enabling native ingestion of delta OTLP metrics, storing the raw sample values without conversion. WARNING: Delta support is in an early stage of development. The ingestion and querying process is likely to change over time.")
default: default:
logger.Warn("Unknown option for --enable-feature", "option", o) logger.Warn("Unknown option for --enable-feature", "option", o)
} }
} }
} }
if c.web.ConvertOTLPDelta && c.web.NativeOTLPDeltaIngestion {
return errors.New("cannot enable otlp-deltatocumulative and otlp-native-delta-ingestion features at the same time")
}
return nil return nil
} }
@ -539,7 +553,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList) Default("").StringsVar(&cfg.featureList)
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
@ -637,6 +651,32 @@ func main() {
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
} }
// Set Go runtime parameters before we get too far into initialization.
updateGoGC(cfgFile, logger)
if cfg.maxprocsEnable {
l := func(format string, a ...interface{}) {
logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
}
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {
logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err)
}
}
if cfg.memlimitEnable {
if _, err := memlimit.SetGoMemLimitWithOpts(
memlimit.WithRatio(cfg.memlimitRatio),
memlimit.WithProvider(
memlimit.ApplyFallback(
memlimit.FromCgroup,
memlimit.FromSystem,
),
),
memlimit.WithLogger(logger.With("component", "automemlimit")),
); err != nil {
logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
}
}
// Now that the validity of the config is established, set the config // Now that the validity of the config is established, set the config
// success metrics accordingly, although the config isn't really loaded // success metrics accordingly, although the config isn't really loaded
// yet. This will happen later (including setting these metrics again), // yet. This will happen later (including setting these metrics again),
@ -787,29 +827,6 @@ func main() {
ruleManager *rules.Manager ruleManager *rules.Manager
) )
if cfg.maxprocsEnable {
l := func(format string, a ...interface{}) {
logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
}
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {
logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err)
}
}
if cfg.memlimitEnable {
if _, err := memlimit.SetGoMemLimitWithOpts(
memlimit.WithRatio(cfg.memlimitRatio),
memlimit.WithProvider(
memlimit.ApplyFallback(
memlimit.FromCgroup,
memlimit.FromSystem,
),
),
); err != nil {
logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
}
}
if !agentMode { if !agentMode {
opts := promql.EngineOpts{ opts := promql.EngineOpts{
Logger: logger.With("component", "query engine"), Logger: logger.With("component", "query engine"),
@ -1495,6 +1512,14 @@ func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logg
return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
} }
updateGoGC(conf, logger)
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start))
return nil
}
func updateGoGC(conf *config.Config, logger *slog.Logger) {
oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC) oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC)
if oldGoGC != conf.Runtime.GoGC { if oldGoGC != conf.Runtime.GoGC {
logger.Info("updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC) logger.Info("updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC)
@ -1505,10 +1530,6 @@ func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logg
} else { } else {
os.Setenv("GOGC", "off") os.Setenv("GOGC", "off")
} }
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start))
return nil
} }
func startsOrEndsWithQuote(s string) bool { func startsOrEndsWithQuote(s string) bool {

View File

@ -61,6 +61,8 @@ import (
"github.com/prometheus/prometheus/util/documentcli" "github.com/prometheus/prometheus/util/documentcli"
) )
var promqlEnableDelayedNameRemoval = false
func init() { func init() {
// This can be removed when the legacy global mode is fully deprecated. // This can be removed when the legacy global mode is fully deprecated.
//nolint:staticcheck //nolint:staticcheck
@ -255,15 +257,15 @@ func main() {
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.") tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String() dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String()
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.") tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String() dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String()
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.") importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.")
@ -304,7 +306,7 @@ func main() {
promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String() promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String()
promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String() promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String()
featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Currently unused.").Default("").Strings() featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details").Default("").Strings()
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden() documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
@ -338,10 +340,14 @@ func main() {
opts := strings.Split(f, ",") opts := strings.Split(f, ",")
for _, o := range opts { for _, o := range opts {
switch o { switch o {
case "promql-experimental-functions":
parser.EnableExperimentalFunctions = true
case "promql-delayed-name-removal":
promqlEnableDelayedNameRemoval = true
case "": case "":
continue continue
default: default:
fmt.Printf(" WARNING: --enable-feature is currently a no-op") fmt.Printf(" WARNING: Unknown feature passed to --enable-feature: %s", o)
} }
} }
} }
@ -401,6 +407,7 @@ func main() {
promqltest.LazyLoaderOpts{ promqltest.LazyLoaderOpts{
EnableAtModifier: true, EnableAtModifier: true,
EnableNegativeOffset: true, EnableNegativeOffset: true,
EnableDelayedNameRemoval: promqlEnableDelayedNameRemoval,
}, },
*testRulesRun, *testRulesRun,
*testRulesDiff, *testRulesDiff,

View File

@ -558,6 +558,16 @@ func TestCheckRules(t *testing.T) {
}) })
} }
func TestCheckRulesWithFeatureFlag(t *testing.T) {
// As opposed to TestCheckRules calling CheckRules directly we run promtool
// so the feature flag parsing can be tested.
args := []string{"-test.main", "--enable-feature=promql-experimental-functions", "check", "rules", "testdata/features.yml"}
tool := exec.Command(promtoolPath, args...)
err := tool.Run()
require.NoError(t, err)
}
func TestCheckRulesWithRuleFiles(t *testing.T) { func TestCheckRulesWithRuleFiles(t *testing.T) {
t.Run("rules-good", func(t *testing.T) { t.Run("rules-good", func(t *testing.T) {
t.Parallel() t.Parallel()

6
cmd/promtool/testdata/features.yml vendored Normal file
View File

@ -0,0 +1,6 @@
groups:
- name: features
rules:
- record: x
# We don't expect anything from this, just want to check the function parses.
expr: sort_by_label(up, "instance")

View File

@ -0,0 +1,43 @@
# Minimal test case to see that fuzzy compare is working as expected.
# It should allow slight floating point differences through. Larger
# floating point differences should still fail.
evaluation_interval: 1m
fuzzy_compare: true
tests:
- name: correct fuzzy match
input_series:
- series: test_low
values: 2.9999999999999996
- series: test_high
values: 3.0000000000000004
promql_expr_test:
- expr: test_low
eval_time: 0
exp_samples:
- labels: test_low
value: 3
- expr: test_high
eval_time: 0
exp_samples:
- labels: test_high
value: 3
- name: wrong fuzzy match
input_series:
- series: test_low
values: 2.9999999999999987
- series: test_high
values: 3.0000000000000013
promql_expr_test:
- expr: test_low
eval_time: 0
exp_samples:
- labels: test_low
value: 3
- expr: test_high
eval_time: 0
exp_samples:
- labels: test_high
value: 3

View File

@ -0,0 +1,24 @@
# Minimal test case to see that fuzzy compare can be turned off,
# and slight floating point differences fail matching.
evaluation_interval: 1m
fuzzy_compare: false
tests:
- name: correct fuzzy match
input_series:
- series: test_low
values: 2.9999999999999996
- series: test_high
values: 3.0000000000000004
promql_expr_test:
- expr: test_low
eval_time: 0
exp_samples:
- labels: test_low
value: 3
- expr: test_high
eval_time: 0
exp_samples:
- labels: test_high
value: 3

View File

@ -19,6 +19,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
@ -130,7 +131,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
if t.Interval == 0 { if t.Interval == 0 {
t.Interval = unitTestInp.EvaluationInterval t.Interval = unitTestInp.EvaluationInterval
} }
ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, ignoreUnknownFields, unitTestInp.RuleFiles...) ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, ignoreUnknownFields, unitTestInp.FuzzyCompare, unitTestInp.RuleFiles...)
if ers != nil { if ers != nil {
for _, e := range ers { for _, e := range ers {
tc.Fail(e.Error()) tc.Fail(e.Error())
@ -159,6 +160,7 @@ type unitTestFile struct {
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
GroupEvalOrder []string `yaml:"group_eval_order"` GroupEvalOrder []string `yaml:"group_eval_order"`
Tests []testGroup `yaml:"tests"` Tests []testGroup `yaml:"tests"`
FuzzyCompare bool `yaml:"fuzzy_compare,omitempty"`
} }
// resolveAndGlobFilepaths joins all relative paths in a configuration // resolveAndGlobFilepaths joins all relative paths in a configuration
@ -197,7 +199,7 @@ type testGroup struct {
} }
// test performs the unit tests. // test performs the unit tests.
func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug, ignoreUnknownFields bool, ruleFiles ...string) (outErr []error) { func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug, ignoreUnknownFields, fuzzyCompare bool, ruleFiles ...string) (outErr []error) {
if debug { if debug {
testStart := time.Now() testStart := time.Now()
fmt.Printf("DEBUG: Starting test %s\n", testname) fmt.Printf("DEBUG: Starting test %s\n", testname)
@ -237,6 +239,14 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
mint := time.Unix(0, 0).UTC() mint := time.Unix(0, 0).UTC()
maxt := mint.Add(tg.maxEvalTime()) maxt := mint.Add(tg.maxEvalTime())
// Optional floating point compare fuzzing.
var compareFloat64 cmp.Option = cmp.Options{}
if fuzzyCompare {
compareFloat64 = cmp.Comparer(func(x, y float64) bool {
return x == y || math.Nextafter(x, math.Inf(-1)) == y || math.Nextafter(x, math.Inf(1)) == y
})
}
// Pre-processing some data for testing alerts. // Pre-processing some data for testing alerts.
// All this preparation is so that we can test alerts as we evaluate the rules. // All this preparation is so that we can test alerts as we evaluate the rules.
// This avoids storing them in memory, as the number of evals might be high. // This avoids storing them in memory, as the number of evals might be high.
@ -374,7 +384,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
sort.Sort(gotAlerts) sort.Sort(gotAlerts)
sort.Sort(expAlerts) sort.Sort(expAlerts)
if !cmp.Equal(expAlerts, gotAlerts, cmp.Comparer(labels.Equal)) { if !cmp.Equal(expAlerts, gotAlerts, cmp.Comparer(labels.Equal), compareFloat64) {
var testName string var testName string
if tg.TestGroupName != "" { if tg.TestGroupName != "" {
testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName) testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName)
@ -482,7 +492,7 @@ Outer:
sort.Slice(gotSamples, func(i, j int) bool { sort.Slice(gotSamples, func(i, j int) bool {
return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0 return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0
}) })
if !cmp.Equal(expSamples, gotSamples, cmp.Comparer(labels.Equal)) { if !cmp.Equal(expSamples, gotSamples, cmp.Comparer(labels.Equal), compareFloat64) {
errs = append(errs, fmt.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr, errs = append(errs, fmt.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr,
testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples))) testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples)))
} }

View File

@ -240,6 +240,29 @@ func TestRulesUnitTestRun(t *testing.T) {
ignoreUnknownFields: true, ignoreUnknownFields: true,
want: 0, want: 0,
}, },
{
name: "Test precise floating point comparison expected failure",
args: args{
files: []string{"./testdata/rules_run_no_fuzzy.yml"},
},
want: 1,
},
{
name: "Test fuzzy floating point comparison correct match",
args: args{
run: []string{"correct"},
files: []string{"./testdata/rules_run_fuzzy.yml"},
},
want: 0,
},
{
name: "Test fuzzy floating point comparison wrong match",
args: args{
run: []string{"wrong"},
files: []string{"./testdata/rules_run_fuzzy.yml"},
},
want: 1,
},
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {

View File

@ -110,9 +110,9 @@ func Load(s string, logger *slog.Logger) (*Config, error) {
switch cfg.OTLPConfig.TranslationStrategy { switch cfg.OTLPConfig.TranslationStrategy {
case UnderscoreEscapingWithSuffixes: case UnderscoreEscapingWithSuffixes:
case "": case "":
case NoUTF8EscapingWithSuffixes: case NoTranslation, NoUTF8EscapingWithSuffixes:
if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig { if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig {
return nil, errors.New("OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled") return nil, fmt.Errorf("OTLP translation strategy %q is not allowed when UTF8 is disabled", cfg.OTLPConfig.TranslationStrategy)
} }
default: default:
return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy) return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy)
@ -1509,6 +1509,21 @@ var (
// and label name characters that are not alphanumerics/underscores to underscores. // and label name characters that are not alphanumerics/underscores to underscores.
// Unit and type suffixes may be appended to metric names, according to certain rules. // Unit and type suffixes may be appended to metric names, according to certain rules.
UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes" UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes"
// NoTranslation (EXPERIMENTAL): disables all translation of incoming metric
// and label names. This offers a way for the OTLP users to use native metric names, reducing confusion.
//
// WARNING: This setting has significant known risks and limitations (see
// https://prometheus.io/docs/practices/naming/ for details):
// * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration).
// * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed
// time series. For instance, you may end up in situation of ingesting `foo.bar` series with unit
// `seconds` and a separate series `foo.bar` with unit `milliseconds`.
//
// As a result, this setting is experimental and currently, should not be used in
// production systems.
//
// TODO(ArthurSens): Mention `type-and-unit-labels` feature (https://github.com/prometheus/proposals/pull/39) once released, as potential mitigation of the above risks.
NoTranslation translationStrategyOption = "NoTranslation"
) )
// OTLPConfig is the configuration for writing to the OTLP endpoint. // OTLPConfig is the configuration for writing to the OTLP endpoint.

View File

@ -1677,7 +1677,7 @@ func TestOTLPConvertHistogramsToNHCB(t *testing.T) {
} }
func TestOTLPAllowUTF8(t *testing.T) { func TestOTLPAllowUTF8(t *testing.T) {
t.Run("good config", func(t *testing.T) { t.Run("good config - NoUTF8EscapingWithSuffixes", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml") fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml")
verify := func(t *testing.T, conf *Config, err error) { verify := func(t *testing.T, conf *Config, err error) {
t.Helper() t.Helper()
@ -1697,11 +1697,51 @@ func TestOTLPAllowUTF8(t *testing.T) {
}) })
}) })
t.Run("incompatible config", func(t *testing.T) { t.Run("incompatible config - NoUTF8EscapingWithSuffixes", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml") fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml")
verify := func(t *testing.T, err error) { verify := func(t *testing.T, err error) {
t.Helper() t.Helper()
require.ErrorContains(t, err, `OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled`) require.ErrorContains(t, err, `OTLP translation strategy "NoUTF8EscapingWithSuffixes" is not allowed when UTF8 is disabled`)
}
t.Run("LoadFile", func(t *testing.T) {
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
_, err = Load(string(content), promslog.NewNopLogger())
t.Log("err", err)
verify(t, err)
})
})
t.Run("good config - NoTranslation", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_no_translation.good.yml")
verify := func(t *testing.T, conf *Config, err error) {
t.Helper()
require.NoError(t, err)
require.Equal(t, NoTranslation, conf.OTLPConfig.TranslationStrategy)
}
t.Run("LoadFile", func(t *testing.T) {
conf, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, conf, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
conf, err := Load(string(content), promslog.NewNopLogger())
verify(t, conf, err)
})
})
t.Run("incompatible config - NoTranslation", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_no_translation.incompatible.yml")
verify := func(t *testing.T, err error) {
t.Helper()
require.ErrorContains(t, err, `OTLP translation strategy "NoTranslation" is not allowed when UTF8 is disabled`)
} }
t.Run("LoadFile", func(t *testing.T) { t.Run("LoadFile", func(t *testing.T) {

View File

@ -0,0 +1,2 @@
otlp:
translation_strategy: NoTranslation

View File

@ -0,0 +1,4 @@
global:
metric_name_validation_scheme: legacy
otlp:
translation_strategy: NoTranslation

View File

@ -55,12 +55,14 @@ type hcloudDiscovery struct {
*refresh.Discovery *refresh.Discovery
client *hcloud.Client client *hcloud.Client
port int port int
labelSelector string
} }
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets. // newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error) { func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error) {
d := &hcloudDiscovery{ d := &hcloudDiscovery{
port: conf.Port, port: conf.Port,
labelSelector: conf.LabelSelector,
} }
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd") rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd")
@ -79,7 +81,10 @@ func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error
} }
func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
servers, err := d.client.Server.All(ctx) servers, err := d.client.Server.AllWithOpts(ctx, hcloud.ServerListOpts{ListOpts: hcloud.ListOpts{
PerPage: 50,
LabelSelector: d.labelSelector,
}})
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -59,6 +59,9 @@ type SDConfig struct {
RefreshInterval model.Duration `yaml:"refresh_interval"` RefreshInterval model.Duration `yaml:"refresh_interval"`
Port int `yaml:"port"` Port int `yaml:"port"`
Role Role `yaml:"role"` Role Role `yaml:"role"`
LabelSelector string `yaml:"label_selector,omitempty"`
hcloudEndpoint string // For tests only. hcloudEndpoint string // For tests only.
robotEndpoint string // For tests only. robotEndpoint string // For tests only.
} }

View File

@ -110,7 +110,8 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
e.logger.Error("converting to EndpointSlice object failed", "err", err) e.logger.Error("converting to EndpointSlice object failed", "err", err)
continue continue
} }
if lv, exists := es.Labels[v1.LabelServiceName]; exists && lv == svc.Name { // Only consider the underlying EndpointSlices in the same namespace.
if svcName, exists := es.Labels[v1.LabelServiceName]; exists && svcName == svc.Name && es.Namespace == svc.Namespace {
e.enqueue(es) e.enqueue(es)
} }
} }

View File

@ -61,7 +61,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | | <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | | | <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` | | <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View File

@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system.
| <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). | | <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). |
| <code class="text-nowrap">--version</code> | Show application version. | | <code class="text-nowrap">--version</code> | Show application version. |
| <code class="text-nowrap">--experimental</code> | Enable experimental commands. | | <code class="text-nowrap">--experimental</code> | Enable experimental commands. |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Currently unused. | | <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details |
@ -581,8 +581,8 @@ Dump samples from a TSDB.
| Flag | Description | Default | | Flag | Description | Default |
| --- | --- | --- | | --- | --- | --- |
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | | <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | |
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` | | <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump, in milliseconds since the Unix epoch. | `-9223372036854775808` |
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` | | <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump, in milliseconds since the Unix epoch. | `9223372036854775807` |
| <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | | <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
@ -608,8 +608,8 @@ Dump samples from a TSDB.
| Flag | Description | Default | | Flag | Description | Default |
| --- | --- | --- | | --- | --- | --- |
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | | <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | |
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` | | <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump, in milliseconds since the Unix epoch. | `-9223372036854775808` |
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` | | <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump, in milliseconds since the Unix epoch. | `9223372036854775807` |
| <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | | <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |

View File

@ -186,6 +186,16 @@ otlp:
# - "NoUTF8EscapingWithSuffixes" is a mode that relies on UTF-8 support in Prometheus. # - "NoUTF8EscapingWithSuffixes" is a mode that relies on UTF-8 support in Prometheus.
# It preserves all special characters like dots, but still adds required metric name suffixes # It preserves all special characters like dots, but still adds required metric name suffixes
# for units and _total, as UnderscoreEscapingWithSuffixes does. # for units and _total, as UnderscoreEscapingWithSuffixes does.
# - (EXPERIMENTAL) "NoTranslation" is a mode that relies on UTF-8 support in Prometheus.
# It preserves all special character like dots and won't append special suffixes for metric
# unit and type.
#
# WARNING: The "NoTranslation" setting has significant known risks and limitations (see https://prometheus.io/docs/practices/naming/
# for details):
# * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration).
# * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed
# time series. For instance, you may end up in situation of ingesting `foo.bar` series with unit
# `seconds` and a separate series `foo.bar` with unit `milliseconds`.
[ translation_strategy: <string> | default = "UnderscoreEscapingWithSuffixes" ] [ translation_strategy: <string> | default = "UnderscoreEscapingWithSuffixes" ]
# Enables adding "service.name", "service.namespace" and "service.instance.id" # Enables adding "service.name", "service.namespace" and "service.instance.id"
# resource attributes to the "target_info" metric, on top of converting # resource attributes to the "target_info" metric, on top of converting
@ -649,7 +659,7 @@ A `tls_config` allows configuring TLS connections.
### `<oauth2>` ### `<oauth2>`
OAuth 2.0 authentication using the client credentials grant type. OAuth 2.0 authentication using the client credentials or password grant type.
Prometheus fetches an access token from the specified endpoint with Prometheus fetches an access token from the specified endpoint with
the given client access and secret keys. the given client access and secret keys.
@ -669,6 +679,11 @@ scopes:
token_url: <string> token_url: <string>
# Optional parameters to append to the token URL. # Optional parameters to append to the token URL.
# To set 'password' grant type, add it to params:
# endpoint_params:
# grant_type: 'password'
# username: 'username@example.com'
# password: 'strongpassword'
endpoint_params: endpoint_params:
[ <string>: <string> ... ] [ <string>: <string> ... ]
@ -1623,6 +1638,10 @@ role: <string>
# The time after which the servers are refreshed. # The time after which the servers are refreshed.
[ refresh_interval: <duration> | default = 60s ] [ refresh_interval: <duration> | default = 60s ]
# Label selector used to filter the servers when fetching them from the API. See https://docs.hetzner.cloud/#label-selector for more details.
# Only used when role is hcloud.
[ label_selector: <string> ]
# HTTP client settings, including authentication methods (such as basic auth and # HTTP client settings, including authentication methods (such as basic auth and
# authorization), proxy configurations, TLS options, custom HTTP headers, etc. # authorization), proxy configurations, TLS options, custom HTTP headers, etc.
[ <http_config> ] [ <http_config> ]

View File

@ -17,10 +17,6 @@ Rule files use YAML.
The rule files can be reloaded at runtime by sending `SIGHUP` to the Prometheus The rule files can be reloaded at runtime by sending `SIGHUP` to the Prometheus
process. The changes are only applied if all rule files are well-formatted. process. The changes are only applied if all rule files are well-formatted.
_Note about native histograms (experimental feature): Native histogram are always
recorded as gauge histograms (for now). Most cases will create gauge histograms
naturally, e.g. after `rate()`._
## Syntax-checking rules ## Syntax-checking rules
To quickly check whether a rule file is syntactically correct without starting To quickly check whether a rule file is syntactically correct without starting

View File

@ -24,6 +24,10 @@ rule_files:
[ evaluation_interval: <duration> | default = 1m ] [ evaluation_interval: <duration> | default = 1m ]
# Setting fuzzy_compare true will very slightly weaken floating point comparisons.
# This will (effectively) ignore differences in the last bit of the mantissa.
[ fuzzy_compare: <boolean> | default = false ]
# The order in which group names are listed below will be the order of evaluation of # The order in which group names are listed below will be the order of evaluation of
# rule groups (at a given evaluation time). The order is guaranteed only for the groups mentioned below. # rule groups (at a given evaluation time). The order is guaranteed only for the groups mentioned below.
# All the groups need not be mentioned below. # All the groups need not be mentioned below.

View File

@ -168,7 +168,7 @@ recommended to update these files atomically.
`--enable-feature=otlp-deltatocumulative` `--enable-feature=otlp-deltatocumulative`
When enabled, Prometheus will convert OTLP metrics from delta temporality to their When enabled, Prometheus will convert OTLP metrics from delta temporality to their
cumulative equivalent, instead of dropping them. cumulative equivalent, instead of dropping them. This cannot be enabled in conjunction with `otlp-native-delta-ingestion`.
This uses This uses
[deltatocumulative][d2c] [deltatocumulative][d2c]
@ -183,4 +183,67 @@ This state is periodically ([`max_stale`][d2c]) cleared of inactive series.
Enabling this _can_ have negative impact on performance, because the in-memory Enabling this _can_ have negative impact on performance, because the in-memory
state is mutex guarded. Cumulative-only OTLP requests are not affected. state is mutex guarded. Cumulative-only OTLP requests are not affected.
### PromQL arithmetic expressions in time durations
`--enable-feature=promql-duration-expr`
With this flag, arithmetic expressions can be used in time durations in range queries and offset durations. For example:
In range queries:
rate(http_requests_total[5m * 2]) # 10 minute range
rate(http_requests_total[(5+2) * 1m]) # 7 minute range
In offset durations:
http_requests_total offset (1h / 2) # 30 minute offset
http_requests_total offset ((2 ^ 3) * 1m) # 8 minute offset
Note: Duration expressions are not supported in the @ timestamp operator.
The following operators are supported:
* `+` - addition
* `-` - subtraction
* `*` - multiplication
* `/` - division
* `%` - modulo
* `^` - exponentiation
Examples of equivalent durations:
* `5m * 2` is the equivalent to `10m` or `600s`
* `10m - 1m` is the equivalent to `9m` or `540s`
* `(5+2) * 1m` is the equivalent to `7m` or `420s`
* `1h / 2` is the equivalent to `30m` or `1800s`
* `4h % 3h` is the equivalent to `1h` or `3600s`
* `(2 ^ 3) * 1m` is the equivalent to `8m` or `480s`
[d2c]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor [d2c]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor
## OTLP Native Delta Support
`--enable-feature=otlp-native-delta-ingestion`
When enabled, allows for the native ingestion of delta OTLP metrics, storing the raw sample values without conversion. This cannot be enabled in conjunction with `otlp-deltatocumulative`.
Currently, the StartTimeUnixNano field is ignored, and deltas are given the unknown metric metadata type.
Delta support is in a very early stage of development and the ingestion and querying process my change over time. For the open proposal see [prometheus/proposals#48](https://github.com/prometheus/proposals/pull/48).
### Querying
We encourage users to experiment with deltas and existing PromQL functions; we will collect feedback and likely build features to improve the experience around querying deltas.
Note that standard PromQL counter functions like `rate()` and `increase()` are designed for cumulative metrics and will produce incorrect results when used with delta metrics. This may change in the future, but for now, to get similar results for delta metrics, you need `sum_over_time()`:
* `sum_over_time(delta_metric[<range>])`: Calculates the sum of delta values over the specified time range.
* `sum_over_time(delta_metric[<range>]) / <range>`: Calculates the per-second rate of the delta metric.
These may not work well if the `<range>` is not a multiple of the collection interval of the metric. For example, if you do `sum_over_time(delta_metric[1m]) / 1m` range query (with a 1m step), but the collection interval of a metric is 10m, the graph will show a single point every 10 minutes with a high rate value, rather than 10 points with a lower, constant value.
### Current gotchas
* If delta metrics are exposed via [federation](https://prometheus.io/docs/prometheus/latest/federation/), data can be incorrectly collected if the ingestion interval is not the same as the scrape interval for the federated endpoint.
* It is difficult to figure out whether a metric has delta or cumulative temporality, since there's no indication of temporality in metric names or labels. For now, if you are ingesting a mix of delta and cumulative metrics we advise you to explicitly add your own labels to distinguish them. In the future, we plan to introduce type labels to consistently distinguish metric types and potentially make PromQL functions type-aware (e.g. providing warnings when cumulative-only functions are used with delta metrics).
* If there are multiple samples being ingested at the same timestamp, only one of the points is kept - the samples are **not** summed together (this is how Prometheus works in general - duplicate timestamp samples are rejected). Any aggregation will have to be done before sending samples to Prometheus.

View File

@ -253,10 +253,23 @@ histogram samples:
## `histogram_fraction()` ## `histogram_fraction()`
`histogram_fraction(lower scalar, upper scalar, v instant-vector)` returns the `histogram_fraction(lower scalar, upper scalar, b instant-vector)` returns the
estimated fraction of observations between the provided lower and upper values estimated fraction of observations between the provided lower and upper values
for each histogram sample in `v`. Float samples are ignored and do not show up for each classic or native histogram contained in `b`. Float samples in `b` are
in the returned vector. considered the counts of observations in each bucket of one or more classic
histograms, while native histogram samples in `b` are treated each individually
as a separate histogram. This works in the same way as for `histogram_quantile()`.
(See there for more details.)
If the provided lower and upper values do not coincide with bucket boundaries,
the calculated fraction is an estimate, using the same interpolation method as for
`histogram_quantile()`. (See there for more details.) Especially with classic
histograms, it is easy to accidentally pick lower or upper values that are very
far away from any bucket boundary, leading to large margins of error. Rather than
using `histogram_fraction()` with classic histograms, it is often a more robust approach
to directly act on the bucket series when calculating fractions. See the
[calculation of the Apdex scare](https://prometheus.io/docs/practices/histograms/#apdex-score)
as a typical example.
For example, the following expression calculates the fraction of HTTP requests For example, the following expression calculates the fraction of HTTP requests
over the last hour that took 200ms or less: over the last hour that took 200ms or less:
@ -280,8 +293,8 @@ feature inclusive upper boundaries and exclusive lower boundaries for positive
values, and vice versa for negative values.) Without a precise alignment of values, and vice versa for negative values.) Without a precise alignment of
boundaries, the function uses interpolation to estimate the fraction. With the boundaries, the function uses interpolation to estimate the fraction. With the
resulting uncertainty, it becomes irrelevant if the boundaries are inclusive or resulting uncertainty, it becomes irrelevant if the boundaries are inclusive or
exclusive. The interpolation method is the same as the one used for exclusive.
`histogram_quantile()`. See there for more details.
## `histogram_quantile()` ## `histogram_quantile()`
@ -415,9 +428,11 @@ annotation, you should find and remove the source of the invalid data.
## `histogram_stddev()` and `histogram_stdvar()` ## `histogram_stddev()` and `histogram_stdvar()`
`histogram_stddev(v instant-vector)` returns the estimated standard deviation `histogram_stddev(v instant-vector)` returns the estimated standard deviation
of observations for each histogram sample in `v`, based on the geometric mean of observations for each histogram sample in `v`. For this estimation, all observations
of the buckets where the observations lie. Float samples are ignored and do not in a bucket are assumed to have the value of the mean of the bucket boundaries. For
show up in the returned vector. the zero bucket and for buckets with custom boundaries, the arithmetic mean is used.
For the usual exponential buckets, the geometric mean is used. Float samples are ignored
and do not show up in the returned vector.
Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard
variance of observations for each histogram sample in `v`. variance of observations for each histogram sample in `v`.

View File

@ -7,7 +7,7 @@ require (
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v1.0.0 github.com/golang/snappy v1.0.0
github.com/influxdata/influxdb-client-go/v2 v2.14.0 github.com/influxdata/influxdb-client-go/v2 v2.14.0
github.com/prometheus/client_golang v1.21.1 github.com/prometheus/client_golang v1.22.0
github.com/prometheus/common v0.63.0 github.com/prometheus/common v0.63.0
github.com/prometheus/prometheus v1.99.0 github.com/prometheus/prometheus v1.99.0
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
@ -37,7 +37,7 @@ require (
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/compress v1.18.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
@ -51,8 +51,8 @@ require (
github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/collector/pdata v1.28.1 // indirect go.opentelemetry.io/collector/pdata v1.30.0 // indirect
go.opentelemetry.io/collector/semconv v0.122.1 // indirect go.opentelemetry.io/collector/semconv v0.124.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
go.opentelemetry.io/otel v1.35.0 // indirect go.opentelemetry.io/otel v1.35.0 // indirect
go.opentelemetry.io/otel/metric v1.35.0 // indirect go.opentelemetry.io/otel/metric v1.35.0 // indirect
@ -60,14 +60,14 @@ require (
go.uber.org/atomic v1.11.0 // indirect go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.36.0 // indirect golang.org/x/crypto v0.36.0 // indirect
golang.org/x/net v0.37.0 // indirect golang.org/x/net v0.38.0 // indirect
golang.org/x/oauth2 v0.25.0 // indirect golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sys v0.31.0 // indirect golang.org/x/sys v0.31.0 // indirect
golang.org/x/text v0.23.0 // indirect golang.org/x/text v0.23.0 // indirect
golang.org/x/time v0.7.0 // indirect golang.org/x/time v0.7.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
google.golang.org/grpc v1.71.0 // indirect google.golang.org/grpc v1.71.1 // indirect
google.golang.org/protobuf v1.36.5 // indirect google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apimachinery v0.32.3 // indirect k8s.io/apimachinery v0.32.3 // indirect

View File

@ -195,8 +195,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -265,8 +265,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -318,10 +318,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/collector/pdata v1.28.1 h1:ORl5WLpQJvjzBVpHu12lqKMdcf/qDBwRXMcUubhybiQ= go.opentelemetry.io/collector/pdata v1.30.0 h1:j3jyq9um436r6WzWySzexP2nLnFdmL5uVBYAlyr9nDM=
go.opentelemetry.io/collector/pdata v1.28.1/go.mod h1:asKE8MD/4SOKz1mCrGdAz4VO2U2HUNg8A6094uK7pq0= go.opentelemetry.io/collector/pdata v1.30.0/go.mod h1:0Bxu1ktuj4wE7PIASNSvd0SdBscQ1PLtYasymJ13/Cs=
go.opentelemetry.io/collector/semconv v0.122.1 h1:WLzDi3QC4/+LpNMLY90zn5aMDJKyqg/ujW2O4T4sxHg= go.opentelemetry.io/collector/semconv v0.124.0 h1:YTdo3UFwNyDQCh9DiSm2rbzAgBuwn/9dNZ0rv454goA=
go.opentelemetry.io/collector/semconv v0.122.1/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U= go.opentelemetry.io/collector/semconv v0.124.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
@ -365,8 +365,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
@ -423,8 +423,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50=
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI=
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -432,8 +432,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

92
go.mod
View File

@ -3,20 +3,20 @@ module github.com/prometheus/prometheus
go 1.23.0 go 1.23.0
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
github.com/Code-Hex/go-generics-cache v1.5.1 github.com/Code-Hex/go-generics-cache v1.5.1
github.com/KimMachineGun/automemlimit v0.7.1 github.com/KimMachineGun/automemlimit v0.7.1
github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b
github.com/aws/aws-sdk-go v1.55.6 github.com/aws/aws-sdk-go v1.55.7
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0 github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0 github.com/dennwc/varint v1.0.0
github.com/digitalocean/godo v1.141.0 github.com/digitalocean/godo v1.144.0
github.com/docker/docker v28.0.2+incompatible github.com/docker/docker v28.1.1+incompatible
github.com/edsrzf/mmap-go v1.2.0 github.com/edsrzf/mmap-go v1.2.0
github.com/envoyproxy/go-control-plane/envoy v1.32.4 github.com/envoyproxy/go-control-plane/envoy v1.32.4
github.com/envoyproxy/protoc-gen-validate v1.2.1 github.com/envoyproxy/protoc-gen-validate v1.2.1
@ -29,40 +29,40 @@ require (
github.com/google/go-cmp v0.7.0 github.com/google/go-cmp v0.7.0
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/gophercloud/gophercloud/v2 v2.6.0 github.com/gophercloud/gophercloud/v2 v2.7.0
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
github.com/hashicorp/consul/api v1.31.2 github.com/hashicorp/consul/api v1.32.0
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec
github.com/hetznercloud/hcloud-go/v2 v2.20.1 github.com/hetznercloud/hcloud-go/v2 v2.21.0
github.com/ionos-cloud/sdk-go/v6 v6.3.2 github.com/ionos-cloud/sdk-go/v6 v6.3.3
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.18.0 github.com/klauspost/compress v1.18.0
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.48.1 github.com/linode/linodego v1.49.0
github.com/miekg/dns v1.1.64 github.com/miekg/dns v1.1.65
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
github.com/oklog/run v1.1.0 github.com/oklog/run v1.1.0
github.com/oklog/ulid/v2 v2.1.0 github.com/oklog/ulid/v2 v2.1.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.122.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1
github.com/ovh/go-ovh v1.7.0 github.com/ovh/go-ovh v1.7.0
github.com/prometheus/alertmanager v0.28.1 github.com/prometheus/alertmanager v0.28.1
github.com/prometheus/client_golang v1.21.1 github.com/prometheus/client_golang v1.22.0
github.com/prometheus/client_model v0.6.1 github.com/prometheus/client_model v0.6.2
github.com/prometheus/common v0.63.0 github.com/prometheus/common v0.63.0
github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/assets v0.2.0
github.com/prometheus/exporter-toolkit v0.14.0 github.com/prometheus/exporter-toolkit v0.14.0
github.com/prometheus/sigv4 v0.1.2 github.com/prometheus/sigv4 v0.1.2
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
github.com/vultr/govultr/v2 v2.17.2 github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/component v1.28.1 go.opentelemetry.io/collector/component v1.30.0
go.opentelemetry.io/collector/consumer v1.28.1 go.opentelemetry.io/collector/consumer v1.30.0
go.opentelemetry.io/collector/pdata v1.28.1 go.opentelemetry.io/collector/pdata v1.30.0
go.opentelemetry.io/collector/processor v0.122.1 go.opentelemetry.io/collector/processor v1.30.0
go.opentelemetry.io/collector/semconv v0.122.1 go.opentelemetry.io/collector/semconv v0.124.0
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0
go.opentelemetry.io/otel v1.35.0 go.opentelemetry.io/otel v1.35.0
@ -76,14 +76,14 @@ require (
go.uber.org/automaxprocs v1.6.0 go.uber.org/automaxprocs v1.6.0
go.uber.org/goleak v1.3.0 go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0 go.uber.org/multierr v1.11.0
golang.org/x/oauth2 v0.28.0 golang.org/x/oauth2 v0.29.0
golang.org/x/sync v0.12.0 golang.org/x/sync v0.13.0
golang.org/x/sys v0.31.0 golang.org/x/sys v0.32.0
golang.org/x/text v0.23.0 golang.org/x/text v0.24.0
google.golang.org/api v0.227.0 google.golang.org/api v0.230.0
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb
google.golang.org/grpc v1.71.0 google.golang.org/grpc v1.72.0
google.golang.org/protobuf v1.36.5 google.golang.org/protobuf v1.36.6
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.32.3 k8s.io/api v0.32.3
@ -95,22 +95,26 @@ require (
require ( require (
github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
go.opentelemetry.io/collector/featuregate v1.28.0 // indirect go.opentelemetry.io/collector/featuregate v1.30.0 // indirect
go.opentelemetry.io/collector/internal/telemetry v0.124.0 // indirect
go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 // indirect
go.opentelemetry.io/otel/log v0.11.0 // indirect
) )
require ( require (
cloud.google.com/go/auth v0.15.0 // indirect cloud.google.com/go/auth v0.16.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect cloud.google.com/go/compute/metadata v0.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 // indirect github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect
github.com/containerd/log v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -157,7 +161,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/maps v0.1.2 // indirect
github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect
github.com/knadh/koanf/v2 v2.1.2 // indirect github.com/knadh/koanf/v2 v2.1.2 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect
@ -176,8 +180,8 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect github.com/morikuni/aec v1.0.0 // indirect
github.com/oklog/ulid v1.3.1 // indirect github.com/oklog/ulid v1.3.1 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.122.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.122.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
@ -193,19 +197,19 @@ require (
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/collector/confmap v1.28.0 // indirect go.opentelemetry.io/collector/confmap v1.30.0 // indirect
go.opentelemetry.io/collector/confmap/xconfmap v0.122.0 // indirect go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 // indirect
go.opentelemetry.io/collector/pipeline v0.122.1 // indirect go.opentelemetry.io/collector/pipeline v0.124.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/zap v1.27.0 // indirect go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.36.0 // indirect golang.org/x/crypto v0.37.0 // indirect
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect
golang.org/x/mod v0.24.0 // indirect golang.org/x/mod v0.24.0 // indirect
golang.org/x/net v0.38.0 // indirect golang.org/x/net v0.39.0 // indirect
golang.org/x/term v0.30.0 // indirect golang.org/x/term v0.31.0 // indirect
golang.org/x/time v0.11.0 // indirect golang.org/x/time v0.11.0 // indirect
golang.org/x/tools v0.31.0 // indirect golang.org/x/tools v0.32.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect

230
go.sum
View File

@ -1,17 +1,17 @@
cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps= cloud.google.com/go/auth v0.16.0 h1:Pd8P1s9WkcrBE2n/PhAwKsdrR35V3Sg2II9B+ndM3CU=
cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8= cloud.google.com/go/auth v0.16.0/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI=
cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 h1:DSDNVxqkoXJiko6x8a90zidoYqnYYa6c1MTzDKzKkTo= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1/go.mod h1:zGqV2R4Cr/k8Uye5w+dgQ06WJtEcbQG/8J7BB6hnCr4= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 h1:OVoM452qUFBrX+URdH3VpR299ma4kfom0yB0URYky9g=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0/go.mod h1:kUjrAo8bgEwLeZ/CmHqNl3Z/kPm7y6FKfxxK0izYUg4=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
@ -24,8 +24,8 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOEl
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 h1:H5xDQaE3XowWfhZRUpnfC+rGZMEVoSiji+b+/HFAPU4= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
@ -49,8 +49,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -65,8 +65,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 h1:boJj011Hh+874zpIySeApCX4GeOjPl9qhRF3QuIZq+Q= github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk=
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
@ -80,14 +80,14 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/digitalocean/godo v1.141.0 h1:g4vKl9VZvgzE0EEFv8Y3SYiKqdteKS18BrVPatKFSVE= github.com/digitalocean/godo v1.144.0 h1:rDCsmpwcDe5egFQ3Ae45HTde685/GzX037mWRMPufW0=
github.com/digitalocean/godo v1.141.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/digitalocean/godo v1.144.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8= github.com/docker/docker v28.1.1+incompatible h1:49M11BFLsVO1gxY9UX9p/zwkE/rswggs8AdFmXQw51I=
github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v28.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@ -190,16 +190,16 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/gophercloud/gophercloud/v2 v2.6.0 h1:XJKQ0in3iHOZHVAFMXq/OhjCuvvG+BKR0unOqRfG1EI= github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
github.com/gophercloud/gophercloud/v2 v2.6.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/hashicorp/consul/api v1.31.2 h1:NicObVJHcCmyOIl7Z9iHPvvFrocgTYo9cITSGg0/7pw= github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg=
github.com/hashicorp/consul/api v1.31.2/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40= github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
@ -251,10 +251,10 @@ github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977Vrm
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.20.1 h1:1wnKY6keRb6ip1kGPAtnU/ugjMxNkQ4tkulS6uc+vfk= github.com/hetznercloud/hcloud-go/v2 v2.21.0 h1:wUpQT+fgAxIcdMtFvuCJ78ziqc/VARubpOQPQyj4Q84=
github.com/hetznercloud/hcloud-go/v2 v2.20.1/go.mod h1:WSM7w+9tT86sJTNcF8a/oHljC3HUmQfcLxYsgx6PpSc= github.com/hetznercloud/hcloud-go/v2 v2.21.0/go.mod h1:WSM7w+9tT86sJTNcF8a/oHljC3HUmQfcLxYsgx6PpSc=
github.com/ionos-cloud/sdk-go/v6 v6.3.2 h1:2mUmrZZz6cPyT9IRX0T8fBLc/7XU/eTxP2Y5tS7/09k= github.com/ionos-cloud/sdk-go/v6 v6.3.3 h1:q33Sw1ZqsvqDkFaKG53dGk7BCOvPCPbGZpYqsF6tdjw=
github.com/ionos-cloud/sdk-go/v6 v6.3.2/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= github.com/ionos-cloud/sdk-go/v6 v6.3.3/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48=
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@ -272,14 +272,14 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU=
github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU=
github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ=
@ -297,8 +297,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/linode/linodego v1.48.1 h1:Ojw1S+K5jJr1dggO8/H6r4FINxXnJbOU5GkbpaTfmhU= github.com/linode/linodego v1.49.0 h1:MNd3qwvQzbXB5mCpvdCqlUIu1RPA9oC+50LyB9kK+GQ=
github.com/linode/linodego v1.48.1/go.mod h1:fc3t60If8X+yZTFAebhCnNDFrhwQhq9HDU92WnBousQ= github.com/linode/linodego v1.49.0/go.mod h1:B+HAM3//4w1wOS0BwdaQBKwBxlfe6kYJ7bSC6jJ/xtc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@ -325,8 +325,8 @@ github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.64 h1:wuZgD9wwCE6XMT05UU/mlSko71eRSXEAm2EbjQXLKnQ= github.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc=
github.com/miekg/dns v1.1.64/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck= github.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
@ -341,6 +341,10 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -369,14 +373,14 @@ github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.122.0 h1:zHlrYHCN/uGsdfWnAqFb6iksIQv1Aq9lsSTMe/kDsZ0= github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 h1:jOG1ceAx+IATloKXHsE2Cy88XTgqPB/hiXicOrxENx8=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.122.0/go.mod h1:lG9v3A48Y/jox3y8TdhCuakVTZfslTs+u2lkdhc6LIk= github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1/go.mod h1:mtNCoy09iO1f2zy5bEqkyRfRPaNKea57yK63cfHixts=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.122.0 h1:P6lq+OWqsSdO+o+uTrqu/lko96/MnS+Zc4SqMo3bdvs= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1 h1:G2daAIXiQhAwQSz9RK71QsBH9rmH/m/vdkFuGIEPfS4=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.122.0/go.mod h1:45Di232vetvGjROIPxlBlyBMBAgA95szYP8du09shDE= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1/go.mod h1:/WAA1PKvHNz7E5SrtGg2KfAWl/PrmS0FVYOanoGxk0I=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.122.0 h1:Jsn9I74nG85Iw7wWET6g0eQ9tbwVndgNHbzHqdlZVqI= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 h1:mMVzpkpy6rKL1Q/xXNogZVtWebIlxTRzhsgp3b9ioCM=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.122.0/go.mod h1:BpcyQo7MedcfxlBmIgRB5DxdLlEa0wHRJ/Nhe8jjnW4= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1/go.mod h1:jM8Gsd0fIiwRzWrzd7Gm6PZYi5AgHPRkz0625Rtqyxo=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.122.0 h1:VoVWWmasrx6boiis/OV+HmkEXtVm73LXeZMYHJwEgwE= github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 h1:gmmzhgewk2fU0Md0vmaDEFgfRycfCfjgPvMA4SEdKiU=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.122.0/go.mod h1:DEk8LYKrIZS01fhJXohi4tRR89iEcF3zt0oHDTB2TT0= github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1/go.mod h1:AsQJBuUUY1/yqK2c87hv4deeteaKwktwLIfQCN2OGk4=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
@ -409,13 +413,13 @@ github.com/prometheus/alertmanager v0.28.1/go.mod h1:0StpPUDDHi1VXeM7p2yYfeZgLVi
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
@ -435,13 +439,13 @@ github.com/prometheus/sigv4 v0.1.2 h1:R7570f8AoM5YnTUPFm3mjZH5q2k4D+I/phCWvZ4PXG
github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU= github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32 h1:4+LP7qmsLSGbmc66m1s5dKRMBwztRppfxFKlYqYte/c= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32/go.mod h1:kzh+BSAvpoyHHdHBCDhmSWtBc1NbLMZ2lWHqnBoxFks= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY=
@ -484,40 +488,44 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/collector/component v1.28.1 h1:JjwfvLR0UdadRDAANAdM4mOSwGmfGO3va2X+fdk4YdA= go.opentelemetry.io/collector/component v1.30.0 h1:HXjqBHaQ47/EEuWdnkjr4Y3kRWvmyWIDvqa1Q262Fls=
go.opentelemetry.io/collector/component v1.28.1/go.mod h1:jwZRDML3tXo1whueZdRf+y6z3DeEYTLPBmb/O1ujB40= go.opentelemetry.io/collector/component v1.30.0/go.mod h1:vfM9kN+BM6oHBXWibquiprz8CVawxd4/aYy3nbhme3E=
go.opentelemetry.io/collector/component/componentstatus v0.122.1 h1:zMQC0y8ZBITa87GOwEANdOoAox5I4UgaIHxY79nwCbk= go.opentelemetry.io/collector/component/componentstatus v0.124.0 h1:0WHaANNktxLIk+lN+CtgPBESI1MJBrfVW/LvNCbnMQ4=
go.opentelemetry.io/collector/component/componentstatus v0.122.1/go.mod h1:ZYwOgoXyPu4gGqfQ5DeaEpStpUCD/Clctz4rMd9qQYw= go.opentelemetry.io/collector/component/componentstatus v0.124.0/go.mod h1:a/wa8nxJGWOGuLwCN8gHCzFHCaUVZ+VyUYuKz9Yaq38=
go.opentelemetry.io/collector/component/componenttest v0.122.1 h1:HE4oeLub2FWVTUzCQG6SWwfnJfcK1FMknXhGQ2gOxnY= go.opentelemetry.io/collector/component/componenttest v0.124.0 h1:Wsc+DmDrWTFs/aEyjDA3slNwV+h/0NOyIR5Aywvr6Zw=
go.opentelemetry.io/collector/component/componenttest v0.122.1/go.mod h1:o3Xq6z3C0aVhrd/fD56aKxShrILVnHnbgQVP5NoFuic= go.opentelemetry.io/collector/component/componenttest v0.124.0/go.mod h1:NQ4ATOzMFc7QA06B993tq8o27DR0cu/JR/zK7slGJ3E=
go.opentelemetry.io/collector/confmap v1.28.0 h1:pUQh4eOW0YQ1GFWTDP5pw/ZMQuppkz6oSoDDloAH/Sc= go.opentelemetry.io/collector/confmap v1.30.0 h1:Y0MXhjQCdMyJN9xZMWWdNPWs6ncMVf7YVnyAEN2dAcM=
go.opentelemetry.io/collector/confmap v1.28.0/go.mod h1:k/3fo+2RE6m+OKlJzx78Q8hstABYwYgvXO3u9zyTeHI= go.opentelemetry.io/collector/confmap v1.30.0/go.mod h1:9DdThVDIC3VsdtTb7DgT+HwusWOocoqDkd/TErEtQgA=
go.opentelemetry.io/collector/confmap/xconfmap v0.122.0 h1:uRwR2/DEhLCwsdQyD5rTG/cAPUm5ixZb96y3rUaUo/g= go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 h1:PK+CaSgjLvzHaafBieJ3AjiUTAPuf40C+/Fn38LvmW8=
go.opentelemetry.io/collector/confmap/xconfmap v0.122.0/go.mod h1:76K9ypccfRyorlYYit8O82mX4hseQP8VJ/TYqCKI4fA= go.opentelemetry.io/collector/confmap/xconfmap v0.124.0/go.mod h1:DZmFSgWiqXQrzld9uU+73YAVI5JRIgd8RkK5HcaXGU0=
go.opentelemetry.io/collector/consumer v1.28.1 h1:3lHW2e0i7kEkbDqK1vErA8illqPpwDxMzgc5OUDsJ0Y= go.opentelemetry.io/collector/consumer v1.30.0 h1:Nn6kFTH+EJbv13E0W+sNvWrTgbiFCRv8f6DaA2F1DQs=
go.opentelemetry.io/collector/consumer v1.28.1/go.mod h1:g0T16JPMYFN6T2noh+1YBxJSt5i5Zp+Y0Y6pvkMqsDQ= go.opentelemetry.io/collector/consumer v1.30.0/go.mod h1:edRyfk61ugdhCQ93PBLRZfYMVWjdMPpKP8z5QLyESf0=
go.opentelemetry.io/collector/consumer/consumertest v0.122.1 h1:LKkLMdWwJCuOYyCMVzwc0OG9vncIqpl8Tp9+H8RikNg= go.opentelemetry.io/collector/consumer/consumertest v0.124.0 h1:2arChG4RPrHW3lfVWlK/KDF7Y7qkUm/YAiBXh8oTue0=
go.opentelemetry.io/collector/consumer/consumertest v0.122.1/go.mod h1:pYqWgx62ou3uUn8nlt2ohRyKod+7xLTf/uA3YfRwVkA= go.opentelemetry.io/collector/consumer/consumertest v0.124.0/go.mod h1:Hlu+EXbINHxVAyIT1baKO2d0j5odR3fLlLAiaP+JqQg=
go.opentelemetry.io/collector/consumer/xconsumer v0.122.1 h1:iK1hGbho/XICdBfGb4MnKwF9lnhLmv09yQ4YlVm+LGo= go.opentelemetry.io/collector/consumer/xconsumer v0.124.0 h1:/cut96EWVNoz6lIeGI9+EzS6UClMtnZkx5YIpkD0Xe0=
go.opentelemetry.io/collector/consumer/xconsumer v0.122.1/go.mod h1:xYbRPP1oWcYUUDQJTlv78M/rlYb+qE4weiv++ObZRSU= go.opentelemetry.io/collector/consumer/xconsumer v0.124.0/go.mod h1:fHH/MpzFCRNk/4foiYE6BoXQCAMf5sJTO35uvzVrrd4=
go.opentelemetry.io/collector/featuregate v1.28.0 h1:nkaMw0HyOSxojLwlezF2O/xJ9T/Jo1a0iEetesT9lr0= go.opentelemetry.io/collector/featuregate v1.30.0 h1:mx7+iP/FQnY7KO8qw/xE3Qd1MQkWcU8VgcqLNrJ8EU8=
go.opentelemetry.io/collector/featuregate v1.28.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc= go.opentelemetry.io/collector/featuregate v1.30.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
go.opentelemetry.io/collector/pdata v1.28.1 h1:ORl5WLpQJvjzBVpHu12lqKMdcf/qDBwRXMcUubhybiQ= go.opentelemetry.io/collector/internal/telemetry v0.124.0 h1:kzd1/ZYhLj4bt2pDB529mL4rIRrRacemXodFNxfhdWk=
go.opentelemetry.io/collector/pdata v1.28.1/go.mod h1:asKE8MD/4SOKz1mCrGdAz4VO2U2HUNg8A6094uK7pq0= go.opentelemetry.io/collector/internal/telemetry v0.124.0/go.mod h1:ZjXjqV0dJ+6D4XGhTOxg/WHjnhdmXsmwmUSgALea66Y=
go.opentelemetry.io/collector/pdata/pprofile v0.122.1 h1:25Fs0eL/J/M2ZEaVplesbI1H7pYx462zUUVxVOszpOg= go.opentelemetry.io/collector/pdata v1.30.0 h1:j3jyq9um436r6WzWySzexP2nLnFdmL5uVBYAlyr9nDM=
go.opentelemetry.io/collector/pdata/pprofile v0.122.1/go.mod h1:+jSjgb4zRnNmr1R/zgVLVyTVSm9irfGrvGTrk3lDxSE= go.opentelemetry.io/collector/pdata v1.30.0/go.mod h1:0Bxu1ktuj4wE7PIASNSvd0SdBscQ1PLtYasymJ13/Cs=
go.opentelemetry.io/collector/pdata/testdata v0.122.1 h1:9DO8nUUnPAGYMKmrep6wLAfOHprvKY4w/7LpE4jldPQ= go.opentelemetry.io/collector/pdata/pprofile v0.124.0 h1:ZjL9wKqzP4BHj0/F1jfGxs1Va8B7xmYayipZeNVoWJE=
go.opentelemetry.io/collector/pdata/testdata v0.122.1/go.mod h1:hYdNrn8KxFwq1nf44YYRgNhDjJTBzoyEr/Qa26pN0t4= go.opentelemetry.io/collector/pdata/pprofile v0.124.0/go.mod h1:1EN3Gw5LSI4fSVma/Yfv/6nqeuYgRTm1/kmG5nE5Oyo=
go.opentelemetry.io/collector/pipeline v0.122.1 h1:f0uuiDmanVyKwfYo6cWveJsGbLXidV7i+Z7u8QJwWxI= go.opentelemetry.io/collector/pdata/testdata v0.124.0 h1:vY+pWG7CQfzzGSB5+zGYHQOltRQr59Ek9QiPe+rI+NY=
go.opentelemetry.io/collector/pipeline v0.122.1/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= go.opentelemetry.io/collector/pdata/testdata v0.124.0/go.mod h1:lNH48lGhGv4CYk27fJecpsR1zYHmZjKgNrAprwjym0o=
go.opentelemetry.io/collector/processor v0.122.1 h1:AvZvEujq8+FYdJsm9lmAMwuuae5Y2/vKIkOJwsoxsxQ= go.opentelemetry.io/collector/pipeline v0.124.0 h1:hKvhDyH2GPnNO8LGL34ugf36sY7EOXPjBvlrvBhsOdw=
go.opentelemetry.io/collector/processor v0.122.1/go.mod h1:nYKctftba7SbdLml6LxgIrnYRXCShDe2bnNWjTIpF7g= go.opentelemetry.io/collector/pipeline v0.124.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
go.opentelemetry.io/collector/processor/processortest v0.122.1 h1:n4UOx1mq+kLaRiHGsu7vBLq+EGXfzWhSxyFweMjMl54= go.opentelemetry.io/collector/processor v1.30.0 h1:dxmu+sO6MzQydyrf2CON5Hm1KU7yV4ofH1stmreUtPk=
go.opentelemetry.io/collector/processor/processortest v0.122.1/go.mod h1:8/NRWx18tNJMBwCQ8/YPWr4qsFUrwk27qE7/dXoJb1M= go.opentelemetry.io/collector/processor v1.30.0/go.mod h1:DjXAgelT8rfIWCTJP5kiPpxPqz4JLE1mJwsE2kJMTk8=
go.opentelemetry.io/collector/processor/xprocessor v0.122.1 h1:Wfv4/7n4YK1HunAVTMS6yf0xmDjCkftJ6EECNcSwzfs= go.opentelemetry.io/collector/processor/processortest v0.124.0 h1:qcyo0dSWmgpNFxjObsKk3Rd/wWV8CkMevd+jApkTQWE=
go.opentelemetry.io/collector/processor/xprocessor v0.122.1/go.mod h1:9zMW3NQ9+DzcJ1cUq5BhZg3ajoUEMGhNY0ZdYjpX+VI= go.opentelemetry.io/collector/processor/processortest v0.124.0/go.mod h1:1YDTxd4c/uVU3Ui1+AzvYW94mo5DbhNmB1xSof6zvD0=
go.opentelemetry.io/collector/semconv v0.122.1 h1:WLzDi3QC4/+LpNMLY90zn5aMDJKyqg/ujW2O4T4sxHg= go.opentelemetry.io/collector/processor/xprocessor v0.124.0 h1:KAe8gIje8TcB8varZ4PDy0HV5xX5rNdaQ7q46BE915w=
go.opentelemetry.io/collector/semconv v0.122.1/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U= go.opentelemetry.io/collector/processor/xprocessor v0.124.0/go.mod h1:ItJBBlR6/141vg1v4iRrcsBrGjPCgmXAztxS2x2YkdI=
go.opentelemetry.io/collector/semconv v0.124.0 h1:YTdo3UFwNyDQCh9DiSm2rbzAgBuwn/9dNZ0rv454goA=
go.opentelemetry.io/collector/semconv v0.124.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U=
go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 h1:ojdSRDvjrnm30beHOmwsSvLpoRF40MlwNCA+Oo93kXU=
go.opentelemetry.io/contrib/bridges/otelzap v0.10.0/go.mod h1:oTTm4g7NEtHSV2i/0FeVdPaPgUIZPfQkFbq0vbzqnv0=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 h1:0tY123n7CdWMem7MOVdKOt0YfshufLCwfE5Bob+hQuM= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 h1:0tY123n7CdWMem7MOVdKOt0YfshufLCwfE5Bob+hQuM=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0/go.mod h1:CosX/aS4eHnG9D7nESYpV753l4j9q5j3SL/PUYd2lR8= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0/go.mod h1:CosX/aS4eHnG9D7nESYpV753l4j9q5j3SL/PUYd2lR8=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
@ -530,6 +538,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
go.opentelemetry.io/otel/log v0.11.0 h1:c24Hrlk5WJ8JWcwbQxdBqxZdOK7PcP/LFtOtwpDTe3Y=
go.opentelemetry.io/otel/log v0.11.0/go.mod h1:U/sxQ83FPmT29trrifhQg+Zj2lo1/IPN1PF6RTFqdwc=
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
@ -555,8 +565,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -573,18 +583,18 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -610,17 +620,17 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -629,22 +639,22 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.227.0 h1:QvIHF9IuyG6d6ReE+BNd11kIB8hZvjN8Z5xY5t21zYc= google.golang.org/api v0.230.0 h1:2u1hni3E+UXAXrONrrkfWpi/V6cyKVAbfGVeGtC3OxM=
google.golang.org/api v0.227.0/go.mod h1:EIpaG6MbTgQarWF5xJvX0eOJPK9n/5D4Bynb9j2HXvQ= google.golang.org/api v0.230.0/go.mod h1:aqvtoMk7YkiXx+6U12arQFExiRV9D/ekvMCwCd/TksQ=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c= google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e h1:ztQaXfzEXTmCBvbtWYRhJxW+0iJcz2qXfd38/e9l7bA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM=
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -3,37 +3,37 @@ module github.com/prometheus/prometheus/internal/tools
go 1.23.0 go 1.23.0
require ( require (
github.com/bufbuild/buf v1.50.1 github.com/bufbuild/buf v1.51.0
github.com/daixiang0/gci v0.13.6 github.com/daixiang0/gci v0.13.6
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
) )
require ( require (
buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.4-20250121211742-6d880cc6cc8d.1 // indirect buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.5-20250121211742-6d880cc6cc8d.1 // indirect
buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.4-20241127180247-a33202765966.1 // indirect buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.5-20250307204501-0409229c3780.1 // indirect
buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250116203702-1c024d64352b.1 // indirect buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250116203702-1c024d64352b.1 // indirect
buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.4-20250116203702-1c024d64352b.1 // indirect buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.5-20250116203702-1c024d64352b.1 // indirect
buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.4-20241007202033-cf42259fcbfc.1 // indirect buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.4-20241007202033-cf42259fcbfc.1 // indirect
buf.build/go/bufplugin v0.7.0 // indirect buf.build/go/bufplugin v0.8.0 // indirect
buf.build/go/protoyaml v0.3.1 // indirect buf.build/go/protoyaml v0.3.1 // indirect
buf.build/go/spdx v0.2.0 // indirect buf.build/go/spdx v0.2.0 // indirect
cel.dev/expr v0.19.2 // indirect cel.dev/expr v0.21.2 // indirect
connectrpc.com/connect v1.18.1 // indirect connectrpc.com/connect v1.18.1 // indirect
connectrpc.com/otelconnect v0.7.1 // indirect connectrpc.com/otelconnect v0.7.2 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/bufbuild/protocompile v0.14.1 // indirect github.com/bufbuild/protocompile v0.14.1 // indirect
github.com/bufbuild/protoplugin v0.0.0-20250106231243-3a819552c9d9 // indirect github.com/bufbuild/protoplugin v0.0.0-20250106231243-3a819552c9d9 // indirect
github.com/bufbuild/protovalidate-go v0.8.2 // indirect github.com/bufbuild/protovalidate-go v0.9.3-0.20250317160558-38a17488914d // indirect
github.com/containerd/log v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/distribution/reference v0.6.0 // indirect github.com/distribution/reference v0.6.0 // indirect
github.com/docker/cli v27.5.1+incompatible // indirect github.com/docker/cli v27.5.1+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker v28.0.0+incompatible // indirect github.com/docker/docker v28.0.2+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect
@ -73,7 +73,7 @@ require (
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pkg/profile v1.7.0 // indirect github.com/pkg/profile v1.7.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.50.0 // indirect github.com/quic-go/quic-go v0.50.1 // indirect
github.com/rs/cors v1.11.1 // indirect github.com/rs/cors v1.11.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/segmentio/asm v1.2.0 // indirect github.com/segmentio/asm v1.2.0 // indirect
@ -100,7 +100,7 @@ require (
golang.org/x/crypto v0.36.0 // indirect golang.org/x/crypto v0.36.0 // indirect
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 // indirect golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 // indirect
golang.org/x/mod v0.24.0 // indirect golang.org/x/mod v0.24.0 // indirect
golang.org/x/net v0.37.0 // indirect golang.org/x/net v0.38.0 // indirect
golang.org/x/sync v0.12.0 // indirect golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect golang.org/x/sys v0.31.0 // indirect
golang.org/x/term v0.30.0 // indirect golang.org/x/term v0.30.0 // indirect

View File

@ -1,25 +1,25 @@
buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.4-20250121211742-6d880cc6cc8d.1 h1:p5SFT60M93aMQhOz81VH3kPg8t1pp/Litae/1eSxie4= buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.5-20250121211742-6d880cc6cc8d.1 h1:z/NYWpgoeKkKL3+LYF+8QK58Rjz3qkMAshpdzJTaJ7o=
buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.4-20250121211742-6d880cc6cc8d.1/go.mod h1:umI0o7WWHv8lCbLjYUMzfjHKjyaIt2D89sIj1D9fqy0= buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.5-20250121211742-6d880cc6cc8d.1/go.mod h1:LpnZWZGTs6IBCnY9WHAkR9X4/NbpL5nwOXivQdXILTs=
buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.4-20241127180247-a33202765966.1 h1:yeaeyw0RQUe009ebxBQ3TsqBPptiNEGsiS10t+8Htuo= buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.5-20250307204501-0409229c3780.1 h1:j+l4+E1EEo83GVIxuqinfFOTyImSQUH90WfufE86xaI=
buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.4-20241127180247-a33202765966.1/go.mod h1:novQBstnxcGpfKf8qGRATqn1anQKwMJIbH5Q581jibU= buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.5-20250307204501-0409229c3780.1/go.mod h1:eOqrCVUfhh7SLo00urDe/XhJHljj0dWMZirS0aX7cmc=
buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250116203702-1c024d64352b.1 h1:1SDs5tEGoWWv2vmKLx2B0Bp+yfhlxiU4DaZUII8+Pvs= buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250116203702-1c024d64352b.1 h1:1SDs5tEGoWWv2vmKLx2B0Bp+yfhlxiU4DaZUII8+Pvs=
buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250116203702-1c024d64352b.1/go.mod h1:o2AgVM1j3MczvxnMqfZTpiqGwK1VD4JbEagseY0QcjE= buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250116203702-1c024d64352b.1/go.mod h1:o2AgVM1j3MczvxnMqfZTpiqGwK1VD4JbEagseY0QcjE=
buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.4-20250116203702-1c024d64352b.1 h1:uKJgSNHvwQUZ6+0dSnx9MtkZ+h/ORbkKym0rlzIjUSI= buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.5-20250116203702-1c024d64352b.1 h1:MTNYELBBDEj2ddEwWb/vuAm5PRLyWtZe7CLnc6WZ5qQ=
buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.4-20250116203702-1c024d64352b.1/go.mod h1:Ua59W2s7uwPS5sGNgW08QewjBaPnUxOdpkWsuDvJ36Q= buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.5-20250116203702-1c024d64352b.1/go.mod h1:E8bt4dG1/NfuocvVmlDNWIfKoLK0B4AgGq4ubwEGBvo=
buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.4-20241007202033-cf42259fcbfc.1 h1:XmYgi9W/9oST2ZrfT3ucGWkzD9+Vd0ls9yhyZ8ae0KQ= buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.4-20241007202033-cf42259fcbfc.1 h1:XmYgi9W/9oST2ZrfT3ucGWkzD9+Vd0ls9yhyZ8ae0KQ=
buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.4-20241007202033-cf42259fcbfc.1/go.mod h1:cxFpqWIC80Wm8YNo1038ocBmrF84uQ0IfL0uVdAu9ZY= buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.4-20241007202033-cf42259fcbfc.1/go.mod h1:cxFpqWIC80Wm8YNo1038ocBmrF84uQ0IfL0uVdAu9ZY=
buf.build/go/bufplugin v0.7.0 h1:Tq8FXBVfpMxhl3QR6P/gMQHROg1Ss7WhpyD4QVV61ds= buf.build/go/bufplugin v0.8.0 h1:YgR1+CNGmzR69jt85oRWTa5FioZoX/tOrHV+JxfNnnk=
buf.build/go/bufplugin v0.7.0/go.mod h1:LuQzv36Ezu2zQIQUtwg4WJJFe58tXn1anL1IosAh6ik= buf.build/go/bufplugin v0.8.0/go.mod h1:rcm0Esd3P/GM2rtYTvz3+9Gf8w9zdo7rG8dKSxYHHIE=
buf.build/go/protoyaml v0.3.1 h1:ucyzE7DRnjX+mQ6AH4JzN0Kg50ByHHu+yrSKbgQn2D4= buf.build/go/protoyaml v0.3.1 h1:ucyzE7DRnjX+mQ6AH4JzN0Kg50ByHHu+yrSKbgQn2D4=
buf.build/go/protoyaml v0.3.1/go.mod h1:0TzNpFQDXhwbkXb/ajLvxIijqbve+vMQvWY/b3/Dzxg= buf.build/go/protoyaml v0.3.1/go.mod h1:0TzNpFQDXhwbkXb/ajLvxIijqbve+vMQvWY/b3/Dzxg=
buf.build/go/spdx v0.2.0 h1:IItqM0/cMxvFJJumcBuP8NrsIzMs/UYjp/6WSpq8LTw= buf.build/go/spdx v0.2.0 h1:IItqM0/cMxvFJJumcBuP8NrsIzMs/UYjp/6WSpq8LTw=
buf.build/go/spdx v0.2.0/go.mod h1:bXdwQFem9Si3nsbNy8aJKGPoaPi5DKwdeEp5/ArZ6w8= buf.build/go/spdx v0.2.0/go.mod h1:bXdwQFem9Si3nsbNy8aJKGPoaPi5DKwdeEp5/ArZ6w8=
cel.dev/expr v0.19.2 h1:V354PbqIXr9IQdwy4SYA4xa0HXaWq1BUPAGzugBY5V4= cel.dev/expr v0.21.2 h1:o+Wj235dy4gFYlYin3JsMpp3EEfMrPm/6tdoyjT98S0=
cel.dev/expr v0.19.2/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cel.dev/expr v0.21.2/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw= connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw=
connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8= connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8=
connectrpc.com/otelconnect v0.7.1 h1:scO5pOb0i4yUE66CnNrHeK1x51yq0bE0ehPg6WvzXJY= connectrpc.com/otelconnect v0.7.2 h1:WlnwFzaW64dN06JXU+hREPUGeEzpz3Acz2ACOmN8cMI=
connectrpc.com/otelconnect v0.7.1/go.mod h1:dh3bFgHBTb2bkqGCeVVOtHJreSns7uu9wwL2Tbz17ms= connectrpc.com/otelconnect v0.7.2/go.mod h1:JS7XUKfuJs2adhCnXhNHPHLz6oAaZniCJdSF00OZSew=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
@ -28,14 +28,14 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
github.com/bufbuild/buf v1.50.1 h1:3sEaWLw6g7bSIJ+yKo6ERF3qpkaLNGd8SzImFpA5gUI= github.com/bufbuild/buf v1.51.0 h1:k2we7gmuSDeIqxkv16F/8s5Kk0l2ZfvMHpvC1n6o5Rk=
github.com/bufbuild/buf v1.50.1/go.mod h1:LqTlfsFs4RD3L+VoBudEWJzWi12Pa0+Q2vDQnY0YQv0= github.com/bufbuild/buf v1.51.0/go.mod h1:TbX4Df3BfE0Lugd3Y3sFr7QTxqmCfPkuiEexe29KZeE=
github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw=
github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c=
github.com/bufbuild/protoplugin v0.0.0-20250106231243-3a819552c9d9 h1:kAWER21DzhzU7ys8LL1WkSfbGkwXv+tM30hyEsYrW2k= github.com/bufbuild/protoplugin v0.0.0-20250106231243-3a819552c9d9 h1:kAWER21DzhzU7ys8LL1WkSfbGkwXv+tM30hyEsYrW2k=
github.com/bufbuild/protoplugin v0.0.0-20250106231243-3a819552c9d9/go.mod h1:c5D8gWRIZ2HLWO3gXYTtUfw/hbJyD8xikv2ooPxnklQ= github.com/bufbuild/protoplugin v0.0.0-20250106231243-3a819552c9d9/go.mod h1:c5D8gWRIZ2HLWO3gXYTtUfw/hbJyD8xikv2ooPxnklQ=
github.com/bufbuild/protovalidate-go v0.8.2 h1:sgzXHkHYP6HnAsL2Rd3I1JxkYUyEQUv9awU1PduMxbM= github.com/bufbuild/protovalidate-go v0.9.3-0.20250317160558-38a17488914d h1:Y6Yp/LwSaRG8gw9GyyQD7jensL9NXqPlkbuulaAvCEE=
github.com/bufbuild/protovalidate-go v0.8.2/go.mod h1:K6w8iPNAXBoIivVueSELbUeUl+MmeTQfCDSug85pn3M= github.com/bufbuild/protovalidate-go v0.9.3-0.20250317160558-38a17488914d/go.mod h1:SZN6Qr3lPWuKMoQtIhKdhESkb+3m2vk0lqN9WMuZDDU=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
@ -68,16 +68,16 @@ github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3L
github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v28.0.0+incompatible h1:Olh0KS820sJ7nPsBKChVhk5pzqcwDR15fumfAd/p9hM= github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8=
github.com/docker/docker v28.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
@ -184,8 +184,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.50.0 h1:3H/ld1pa3CYhkcc20TPIyG1bNsdhn9qZBGN3b9/UyUo= github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q=
github.com/quic-go/quic-go v0.50.0/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E= github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
@ -272,8 +272,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@ -32,8 +32,8 @@ func (ls Labels) Len() int { return len(ls) }
func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] }
func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
// Bytes returns ls as a byte slice. // Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key.
// It uses an byte invalid character as a separator and so should not be used for printing. // Encoding may change over time or between runs of Prometheus.
func (ls Labels) Bytes(buf []byte) []byte { func (ls Labels) Bytes(buf []byte) []byte {
b := bytes.NewBuffer(buf[:0]) b := bytes.NewBuffer(buf[:0])
b.WriteByte(labelSep) b.WriteByte(labelSep)

View File

@ -140,8 +140,8 @@ func decodeString(t *nameTable, data string, index int) (string, int) {
return t.ToName(num), index return t.ToName(num), index
} }
// Bytes returns ls as a byte slice. // Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key.
// It uses non-printing characters and so should not be used for printing. // Encoding may change over time or between runs of Prometheus.
func (ls Labels) Bytes(buf []byte) []byte { func (ls Labels) Bytes(buf []byte) []byte {
b := bytes.NewBuffer(buf[:0]) b := bytes.NewBuffer(buf[:0])
for i := 0; i < len(ls.data); { for i := 0; i < len(ls.data); {

View File

@ -24,31 +24,25 @@ import (
) )
// Labels is implemented by a single flat string holding name/value pairs. // Labels is implemented by a single flat string holding name/value pairs.
// Each name and value is preceded by its length in varint encoding. // Each name and value is preceded by its length, encoded as a single byte
// for size 0-254, or the following 3 bytes little-endian, if the first byte is 255.
// Maximum length allowed is 2^24 or 16MB.
// Names are in order. // Names are in order.
type Labels struct { type Labels struct {
data string data string
} }
func decodeSize(data string, index int) (int, int) { func decodeSize(data string, index int) (int, int) {
// Fast-path for common case of a single byte, value 0..127.
b := data[index] b := data[index]
index++ index++
if b < 0x80 { if b == 255 {
return int(b), index // Larger numbers are encoded as 3 bytes little-endian.
}
size := int(b & 0x7F)
for shift := uint(7); ; shift += 7 {
// Just panic if we go of the end of data, since all Labels strings are constructed internally and // Just panic if we go of the end of data, since all Labels strings are constructed internally and
// malformed data indicates a bug, or memory corruption. // malformed data indicates a bug, or memory corruption.
b := data[index] return int(data[index]) + (int(data[index+1]) << 8) + (int(data[index+2]) << 16), index + 3
index++
size |= int(b&0x7F) << shift
if b < 0x80 {
break
} }
} // More common case of a single byte, value 0..254.
return size, index return int(b), index
} }
func decodeString(data string, index int) (string, int) { func decodeString(data string, index int) (string, int) {
@ -57,8 +51,8 @@ func decodeString(data string, index int) (string, int) {
return data[index : index+size], index + size return data[index : index+size], index + size
} }
// Bytes returns ls as a byte slice. // Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key.
// It uses non-printing characters and so should not be used for printing. // Encoding may change over time or between runs of Prometheus.
func (ls Labels) Bytes(buf []byte) []byte { func (ls Labels) Bytes(buf []byte) []byte {
if cap(buf) < len(ls.data) { if cap(buf) < len(ls.data) {
buf = make([]byte, len(ls.data)) buf = make([]byte, len(ls.data))
@ -528,48 +522,27 @@ func marshalLabelToSizedBuffer(m *Label, data []byte) int {
return len(data) - i return len(data) - i
} }
func sizeVarint(x uint64) (n int) { func sizeWhenEncoded(x uint64) (n int) {
// Most common case first if x < 255 {
if x < 1<<7 {
return 1 return 1
} else if x <= 1<<24 {
return 4
} }
if x >= 1<<56 { panic("String too long to encode as label.")
return 9
}
if x >= 1<<28 {
x >>= 28
n = 4
}
if x >= 1<<14 {
x >>= 14
n += 2
}
if x >= 1<<7 {
n++
}
return n + 1
} }
func encodeVarint(data []byte, offset int, v uint64) int {
offset -= sizeVarint(v)
base := offset
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return base
}
// Special code for the common case that a size is less than 128.
func encodeSize(data []byte, offset, v int) int { func encodeSize(data []byte, offset, v int) int {
if v < 1<<7 { if v < 255 {
offset-- offset--
data[offset] = uint8(v) data[offset] = uint8(v)
return offset return offset
} }
return encodeVarint(data, offset, uint64(v)) offset -= 4
data[offset] = 255
data[offset+1] = byte(v)
data[offset+2] = byte((v >> 8))
data[offset+3] = byte((v >> 16))
return offset
} }
func labelsSize(lbls []Label) (n int) { func labelsSize(lbls []Label) (n int) {
@ -583,9 +556,9 @@ func labelsSize(lbls []Label) (n int) {
func labelSize(m *Label) (n int) { func labelSize(m *Label) (n int) {
// strings are encoded as length followed by contents. // strings are encoded as length followed by contents.
l := len(m.Name) l := len(m.Name)
n += l + sizeVarint(uint64(l)) n += l + sizeWhenEncoded(uint64(l))
l = len(m.Value) l = len(m.Value)
n += l + sizeVarint(uint64(l)) n += l + sizeWhenEncoded(uint64(l))
return n return n
} }

View File

@ -27,6 +27,8 @@ import (
) )
func TestLabels_String(t *testing.T) { func TestLabels_String(t *testing.T) {
s254 := strings.Repeat("x", 254) // Edge cases for stringlabels encoding.
s255 := strings.Repeat("x", 255)
cases := []struct { cases := []struct {
labels Labels labels Labels
expected string expected string
@ -43,6 +45,14 @@ func TestLabels_String(t *testing.T) {
labels: FromStrings("service.name", "t1", "whatever\\whatever", "t2"), labels: FromStrings("service.name", "t1", "whatever\\whatever", "t2"),
expected: `{"service.name"="t1", "whatever\\whatever"="t2"}`, expected: `{"service.name"="t1", "whatever\\whatever"="t2"}`,
}, },
{
labels: FromStrings("aaa", "111", "xx", s254),
expected: `{aaa="111", xx="` + s254 + `"}`,
},
{
labels: FromStrings("aaa", "111", "xx", s255),
expected: `{aaa="111", xx="` + s255 + `"}`,
},
} }
for _, c := range cases { for _, c := range cases {
str := c.labels.String() str := c.labels.String()

136
promql/durations.go Normal file
View File

@ -0,0 +1,136 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
"fmt"
"math"
"time"
"github.com/prometheus/prometheus/promql/parser"
)
// durationVisitor is a visitor that visits a duration expression and calculates the duration.
type durationVisitor struct{}
func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visitor, error) {
switch n := node.(type) {
case *parser.VectorSelector:
if n.OriginalOffsetExpr != nil {
duration, err := calculateDuration(n.OriginalOffsetExpr, true)
if err != nil {
return nil, err
}
n.OriginalOffset = duration
}
case *parser.MatrixSelector:
if n.RangeExpr != nil {
duration, err := calculateDuration(n.RangeExpr, false)
if err != nil {
return nil, err
}
n.Range = duration
}
case *parser.SubqueryExpr:
if n.OriginalOffsetExpr != nil {
duration, err := calculateDuration(n.OriginalOffsetExpr, true)
if err != nil {
return nil, err
}
n.OriginalOffset = duration
}
if n.StepExpr != nil {
duration, err := calculateDuration(n.StepExpr, false)
if err != nil {
return nil, err
}
n.Step = duration
}
if n.RangeExpr != nil {
duration, err := calculateDuration(n.RangeExpr, false)
if err != nil {
return nil, err
}
n.Range = duration
}
}
return v, nil
}
// calculateDuration computes the duration from a duration expression.
func calculateDuration(expr parser.Expr, allowedNegative bool) (time.Duration, error) {
duration, err := evaluateDurationExpr(expr)
if err != nil {
return 0, err
}
if duration <= 0 && !allowedNegative {
return 0, fmt.Errorf("%d:%d: duration must be greater than 0", expr.PositionRange().Start, expr.PositionRange().End)
}
if duration > 1<<63-1 || duration < -1<<63 {
return 0, fmt.Errorf("%d:%d: duration is out of range", expr.PositionRange().Start, expr.PositionRange().End)
}
return time.Duration(duration*1000) * time.Millisecond, nil
}
// evaluateDurationExpr recursively evaluates a duration expression to a float64 value.
func evaluateDurationExpr(expr parser.Expr) (float64, error) {
switch n := expr.(type) {
case *parser.NumberLiteral:
return n.Val, nil
case *parser.DurationExpr:
var lhs, rhs float64
var err error
if n.LHS != nil {
lhs, err = evaluateDurationExpr(n.LHS)
if err != nil {
return 0, err
}
}
rhs, err = evaluateDurationExpr(n.RHS)
if err != nil {
return 0, err
}
switch n.Op {
case parser.ADD:
return lhs + rhs, nil
case parser.SUB:
if n.LHS == nil {
// Unary negative duration expression.
return -rhs, nil
}
return lhs - rhs, nil
case parser.MUL:
return lhs * rhs, nil
case parser.DIV:
if rhs == 0 {
return 0, fmt.Errorf("%d:%d: division by zero", expr.PositionRange().Start, expr.PositionRange().End)
}
return lhs / rhs, nil
case parser.MOD:
if rhs == 0 {
return 0, fmt.Errorf("%d:%d: modulo by zero", expr.PositionRange().Start, expr.PositionRange().End)
}
return math.Mod(lhs, rhs), nil
case parser.POW:
return math.Pow(lhs, rhs), nil
default:
return 0, fmt.Errorf("unexpected duration expression operator %q", n.Op)
}
default:
return 0, fmt.Errorf("unexpected duration expression type %T", n)
}
}

238
promql/durations_test.go Normal file
View File

@ -0,0 +1,238 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql/parser"
)
func TestDurationVisitor(t *testing.T) {
// Enable experimental duration expression parsing.
parser.ExperimentalDurationExpr = true
t.Cleanup(func() {
parser.ExperimentalDurationExpr = false
})
complexExpr := `sum_over_time(
rate(metric[5m] offset 1h)[10m:30s] offset 2h
) +
avg_over_time(
metric[1h + 30m] offset -1h
) *
count_over_time(
metric[2h * 0.5]
)`
expr, err := parser.ParseExpr(complexExpr)
require.NoError(t, err)
err = parser.Walk(&durationVisitor{}, expr, nil)
require.NoError(t, err)
// Verify different parts of the expression have correct durations.
// This is a binary expression at the top level.
binExpr, ok := expr.(*parser.BinaryExpr)
require.True(t, ok, "Expected binary expression at top level")
// Left side should be sum_over_time with subquery.
leftCall, ok := binExpr.LHS.(*parser.Call)
require.True(t, ok, "Expected call expression on left side")
require.Equal(t, "sum_over_time", leftCall.Func.Name)
// Extract the subquery from sum_over_time.
sumSubquery, ok := leftCall.Args[0].(*parser.SubqueryExpr)
require.True(t, ok, "Expected subquery in sum_over_time")
require.Equal(t, 10*time.Minute, sumSubquery.Range)
require.Equal(t, 30*time.Second, sumSubquery.Step)
require.Equal(t, 2*time.Hour, sumSubquery.OriginalOffset)
// Extract the rate call inside the subquery.
rateCall, ok := sumSubquery.Expr.(*parser.Call)
require.True(t, ok, "Expected rate call in subquery")
require.Equal(t, "rate", rateCall.Func.Name)
// Extract the matrix selector from rate.
rateMatrix, ok := rateCall.Args[0].(*parser.MatrixSelector)
require.True(t, ok, "Expected matrix selector in rate")
require.Equal(t, 5*time.Minute, rateMatrix.Range)
require.Equal(t, 1*time.Hour, rateMatrix.VectorSelector.(*parser.VectorSelector).OriginalOffset)
// Right side should be another binary expression (multiplication).
rightBinExpr, ok := binExpr.RHS.(*parser.BinaryExpr)
require.True(t, ok, "Expected binary expression on right side")
// Left side of multiplication should be avg_over_time.
avgCall, ok := rightBinExpr.LHS.(*parser.Call)
require.True(t, ok, "Expected call expression on left side of multiplication")
require.Equal(t, "avg_over_time", avgCall.Func.Name)
// Extract the matrix selector from avg_over_time.
avgMatrix, ok := avgCall.Args[0].(*parser.MatrixSelector)
require.True(t, ok, "Expected matrix selector in avg_over_time")
require.Equal(t, 90*time.Minute, avgMatrix.Range) // 1h + 30m
require.Equal(t, -1*time.Hour, avgMatrix.VectorSelector.(*parser.VectorSelector).OriginalOffset)
// Right side of multiplication should be count_over_time.
countCall, ok := rightBinExpr.RHS.(*parser.Call)
require.True(t, ok, "Expected call expression on right side of multiplication")
require.Equal(t, "count_over_time", countCall.Func.Name)
// Extract the matrix selector from count_over_time.
countMatrix, ok := countCall.Args[0].(*parser.MatrixSelector)
require.True(t, ok, "Expected matrix selector in count_over_time")
require.Equal(t, 1*time.Hour, countMatrix.Range) // 2h * 0.5
}
func TestCalculateDuration(t *testing.T) {
tests := []struct {
name string
expr parser.Expr
expected time.Duration
errorMessage string
allowedNegative bool
}{
{
name: "addition",
expr: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 5},
RHS: &parser.NumberLiteral{Val: 10},
Op: parser.ADD,
},
expected: 15 * time.Second,
},
{
name: "subtraction",
expr: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 15},
RHS: &parser.NumberLiteral{Val: 5},
Op: parser.SUB,
},
expected: 10 * time.Second,
},
{
name: "subtraction with negative",
expr: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 5},
RHS: &parser.NumberLiteral{Val: 10},
Op: parser.SUB,
},
errorMessage: "duration must be greater than 0",
},
{
name: "multiplication",
expr: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 5},
RHS: &parser.NumberLiteral{Val: 3},
Op: parser.MUL,
},
expected: 15 * time.Second,
},
{
name: "division",
expr: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 15},
RHS: &parser.NumberLiteral{Val: 3},
Op: parser.DIV,
},
expected: 5 * time.Second,
},
{
name: "modulo with numbers",
expr: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 17},
RHS: &parser.NumberLiteral{Val: 5},
Op: parser.MOD,
},
expected: 2 * time.Second,
},
{
name: "power",
expr: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 2},
RHS: &parser.NumberLiteral{Val: 3},
Op: parser.POW,
},
expected: 8 * time.Second,
},
{
name: "complex expression",
expr: &parser.DurationExpr{
LHS: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 2},
RHS: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 3},
RHS: &parser.NumberLiteral{Val: 4},
Op: parser.ADD,
},
Op: parser.MUL,
},
RHS: &parser.NumberLiteral{Val: 1},
Op: parser.SUB,
},
expected: 13 * time.Second,
},
{
name: "unary negative",
expr: &parser.DurationExpr{
RHS: &parser.NumberLiteral{Val: 5},
Op: parser.SUB,
},
expected: -5 * time.Second,
allowedNegative: true,
},
{
name: "division by zero",
expr: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 5},
RHS: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 5},
RHS: &parser.NumberLiteral{Val: 5},
Op: parser.SUB,
},
Op: parser.DIV,
},
errorMessage: "division by zero",
},
{
name: "modulo by zero",
expr: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 5},
RHS: &parser.DurationExpr{
LHS: &parser.NumberLiteral{Val: 5},
RHS: &parser.NumberLiteral{Val: 5},
Op: parser.SUB,
},
Op: parser.MOD,
},
errorMessage: "modulo by zero",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := calculateDuration(tt.expr, tt.allowedNegative)
if tt.errorMessage != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.errorMessage)
return
}
require.NoError(t, err)
require.Equal(t, tt.expected, result)
})
}
}

View File

@ -489,9 +489,9 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts
if err := ng.validateOpts(expr); err != nil { if err := ng.validateOpts(expr); err != nil {
return nil, err return nil, err
} }
*pExpr = PreprocessExpr(expr, ts, ts) *pExpr, err = PreprocessExpr(expr, ts, ts)
return qry, nil return qry, err
} }
// NewRangeQuery returns an evaluation query for the given time range and with // NewRangeQuery returns an evaluation query for the given time range and with
@ -513,9 +513,9 @@ func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts Q
if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar { if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar {
return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type())) return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type()))
} }
*pExpr = PreprocessExpr(expr, start, end) *pExpr, err = PreprocessExpr(expr, start, end)
return qry, nil return qry, err
} }
func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) {
@ -1137,8 +1137,9 @@ type EvalNodeHelper struct {
Out Vector Out Vector
// Caches. // Caches.
// funcHistogramQuantile for classic histograms. // funcHistogramQuantile and funcHistogramFraction for classic histograms.
signatureToMetricWithBuckets map[string]*metricWithBuckets signatureToMetricWithBuckets map[string]*metricWithBuckets
nativeHistogramSamples []Sample
lb *labels.Builder lb *labels.Builder
lblBuf []byte lblBuf []byte
@ -1161,6 +1162,62 @@ func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) {
} }
} }
// resetHistograms prepares the histogram caches by splitting the given vector into native and classic histograms.
func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annotations.Annotations {
var annos annotations.Annotations
if enh.signatureToMetricWithBuckets == nil {
enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{}
} else {
for _, v := range enh.signatureToMetricWithBuckets {
v.buckets = v.buckets[:0]
}
}
enh.nativeHistogramSamples = enh.nativeHistogramSamples[:0]
for _, sample := range inVec {
// We are only looking for classic buckets here. Remember
// the histograms for later treatment.
if sample.H != nil {
enh.nativeHistogramSamples = append(enh.nativeHistogramSamples, sample)
continue
}
upperBound, err := strconv.ParseFloat(
sample.Metric.Get(model.BucketLabel), 64,
)
if err != nil {
annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), arg.PositionRange()))
continue
}
enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel)
mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]
if !ok {
sample.Metric = labels.NewBuilder(sample.Metric).
Del(excludedLabels...).
Labels()
mb = &metricWithBuckets{sample.Metric, nil}
enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb
}
mb.buckets = append(mb.buckets, Bucket{upperBound, sample.F})
}
for _, sample := range enh.nativeHistogramSamples {
// We have to reconstruct the exact same signature as above for
// a classic histogram, just ignoring any le label.
enh.lblBuf = sample.Metric.Bytes(enh.lblBuf)
if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 {
// At this data point, we have classic histogram
// buckets and a native histogram with the same name and
// labels. Do not evaluate anything.
annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), arg.PositionRange()))
delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf))
continue
}
}
return annos
}
// rangeEval evaluates the given expressions, and then for each step calls // rangeEval evaluates the given expressions, and then for each step calls
// the given funcCall with the values computed for each expression at that // the given funcCall with the values computed for each expression at that
// step. The return value is the combination into time series of all the // step. The return value is the combination into time series of all the
@ -1582,6 +1639,11 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
if err := contextDone(ctx, "expression evaluation"); err != nil { if err := contextDone(ctx, "expression evaluation"); err != nil {
ev.error(err) ev.error(err)
} }
if ev.endTimestamp < ev.startTimestamp {
return Matrix{}, nil
}
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
// Create a new span to help investigate inner evaluation performances. // Create a new span to help investigate inner evaluation performances.
@ -3591,15 +3653,20 @@ func unwrapStepInvariantExpr(e parser.Expr) parser.Expr {
} }
// PreprocessExpr wraps all possible step invariant parts of the given expression with // PreprocessExpr wraps all possible step invariant parts of the given expression with
// StepInvariantExpr. It also resolves the preprocessors. // StepInvariantExpr. It also resolves the preprocessors and evaluates duration expressions
func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr { // into their numeric values.
func PreprocessExpr(expr parser.Expr, start, end time.Time) (parser.Expr, error) {
detectHistogramStatsDecoding(expr) detectHistogramStatsDecoding(expr)
if err := parser.Walk(&durationVisitor{}, expr, nil); err != nil {
return nil, err
}
isStepInvariant := preprocessExprHelper(expr, start, end) isStepInvariant := preprocessExprHelper(expr, start, end)
if isStepInvariant { if isStepInvariant {
return newStepInvariantExpr(expr) return newStepInvariantExpr(expr), nil
} }
return expr return expr, nil
} }
// preprocessExprHelper wraps the child nodes of the expression // preprocessExprHelper wraps the child nodes of the expression

View File

@ -1900,15 +1900,6 @@ func TestSubquerySelector(t *testing.T) {
}, },
Start: time.Unix(35, 0), Start: time.Unix(35, 0),
}, },
{
Query: "metric[0:10s]",
Result: promql.Result{
nil,
promql.Matrix{},
nil,
},
Start: time.Unix(10, 0),
},
}, },
}, },
{ {
@ -3096,7 +3087,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
t.Run(test.input, func(t *testing.T) { t.Run(test.input, func(t *testing.T) {
expr, err := parser.ParseExpr(test.input) expr, err := parser.ParseExpr(test.input)
require.NoError(t, err) require.NoError(t, err)
expr = promql.PreprocessExpr(expr, startTime, endTime) expr, err = promql.PreprocessExpr(expr, startTime, endTime)
require.NoError(t, err)
if test.outputTest { if test.outputTest {
require.Equal(t, test.input, expr.String(), "error on input '%s'", test.input) require.Equal(t, test.input, expr.String(), "error on input '%s'", test.input)
} }
@ -3268,11 +3260,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
}, },
}, },
}, },
"matches series but range is 0": {
expr: "some_metric[0]",
ts: baseT.Add(2 * time.Minute),
expected: promql.Matrix{},
},
} }
for name, testCase := range testCases { for name, testCase := range testCases {

View File

@ -20,7 +20,6 @@ import (
"math" "math"
"slices" "slices"
"sort" "sort"
"strconv"
"strings" "strings"
"time" "time"
@ -932,8 +931,7 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva
return append(enh.Out, Sample{F: quantile(q, values)}), annos return append(enh.Out, Sample{F: quantile(q, values)}), annos
} }
// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func varianceOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) {
func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
samples := vals[0].(Matrix)[0] samples := vals[0].(Matrix)[0]
var annos annotations.Annotations var annos annotations.Annotations
if len(samples.Floats) == 0 { if len(samples.Floats) == 0 {
@ -953,33 +951,22 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN
mean, cMean = kahanSumInc(delta/count, mean, cMean) mean, cMean = kahanSumInc(delta/count, mean, cMean)
aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux)
} }
return math.Sqrt((aux + cAux) / count) variance := (aux + cAux) / count
if varianceToResult == nil {
return variance
}
return varianceToResult(variance)
}), annos }), annos
} }
// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return varianceOverTime(vals, args, enh, math.Sqrt)
}
// === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === // === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
samples := vals[0].(Matrix)[0] return varianceOverTime(vals, args, enh, nil)
var annos annotations.Annotations
if len(samples.Floats) == 0 {
return enh.Out, nil
}
if len(samples.Histograms) > 0 {
metricName := samples.Metric.Get(labels.MetricName)
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
}
return aggrOverTime(vals, enh, func(s Series) float64 {
var count float64
var mean, cMean float64
var aux, cAux float64
for _, f := range s.Floats {
count++
delta := f.F - (mean + cMean)
mean, cMean = kahanSumInc(delta/count, mean, cMean)
aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux)
}
return (aux + cAux) / count
}), annos
} }
// === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === // === absent(Vector parser.ValueTypeVector) (Vector, Annotations) ===
@ -1347,11 +1334,9 @@ func funcHistogramAvg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHe
return enh.Out, nil return enh.Out, nil
} }
// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) {
func funcHistogramStdDev(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector)
inVec := vals[0].(Vector) for _, sample := range vec {
for _, sample := range inVec {
// Skip non-histogram samples. // Skip non-histogram samples.
if sample.H == nil { if sample.H == nil {
continue continue
@ -1365,9 +1350,15 @@ func funcHistogramStdDev(vals []parser.Value, _ parser.Expressions, enh *EvalNod
continue continue
} }
var val float64 var val float64
if bucket.Lower <= 0 && 0 <= bucket.Upper { switch {
case sample.H.UsesCustomBuckets():
// Use arithmetic mean in case of custom buckets.
val = (bucket.Upper + bucket.Lower) / 2.0
case bucket.Lower <= 0 && bucket.Upper >= 0:
// Use zero (effectively the arithmetic mean) in the zero bucket of a standard exponential histogram.
val = 0 val = 0
} else { default:
// Use geometric mean in case of standard exponential buckets.
val = math.Sqrt(bucket.Upper * bucket.Lower) val = math.Sqrt(bucket.Upper * bucket.Lower)
if bucket.Upper < 0 { if bucket.Upper < 0 {
val = -val val = -val
@ -1381,48 +1372,8 @@ func funcHistogramStdDev(vals []parser.Value, _ parser.Expressions, enh *EvalNod
if !enh.enableDelayedNameRemoval { if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName() sample.Metric = sample.Metric.DropMetricName()
} }
enh.Out = append(enh.Out, Sample{ if varianceToResult != nil {
Metric: sample.Metric, variance = varianceToResult(variance)
F: math.Sqrt(variance),
DropName: true,
})
}
return enh.Out, nil
}
// === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramStdVar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
inVec := vals[0].(Vector)
for _, sample := range inVec {
// Skip non-histogram samples.
if sample.H == nil {
continue
}
mean := sample.H.Sum / sample.H.Count
var variance, cVariance float64
it := sample.H.AllBucketIterator()
for it.Next() {
bucket := it.At()
if bucket.Count == 0 {
continue
}
var val float64
if bucket.Lower <= 0 && 0 <= bucket.Upper {
val = 0
} else {
val = math.Sqrt(bucket.Upper * bucket.Lower)
if bucket.Upper < 0 {
val = -val
}
}
delta := val - mean
variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
}
variance += cVariance
variance /= sample.H.Count
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
} }
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: sample.Metric, Metric: sample.Metric,
@ -1433,17 +1384,26 @@ func funcHistogramStdVar(vals []parser.Value, _ parser.Expressions, enh *EvalNod
return enh.Out, nil return enh.Out, nil
} }
// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramStdDev(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return histogramVariance(vals, enh, math.Sqrt)
}
// === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramStdVar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return histogramVariance(vals, enh, nil)
}
// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramFraction(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
lower := vals[0].(Vector)[0].F lower := vals[0].(Vector)[0].F
upper := vals[1].(Vector)[0].F upper := vals[1].(Vector)[0].F
inVec := vals[2].(Vector) inVec := vals[2].(Vector)
for _, sample := range inVec { annos := enh.resetHistograms(inVec, args[2])
// Skip non-histogram samples.
if sample.H == nil { // Deal with the native histograms.
continue for _, sample := range enh.nativeHistogramSamples {
}
if !enh.enableDelayedNameRemoval { if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName() sample.Metric = sample.Metric.DropMetricName()
} }
@ -1453,7 +1413,24 @@ func funcHistogramFraction(vals []parser.Value, _ parser.Expressions, enh *EvalN
DropName: true, DropName: true,
}) })
} }
return enh.Out, nil
// Deal with classic histograms that have already been filtered for conflicting native histograms.
for _, mb := range enh.signatureToMetricWithBuckets {
if len(mb.buckets) == 0 {
continue
}
if !enh.enableDelayedNameRemoval {
mb.metric = mb.metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{
Metric: mb.metric,
F: BucketFraction(lower, upper, mb.buckets),
DropName: true,
})
}
return enh.Out, annos
} }
// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === // === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
@ -1465,58 +1442,10 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
if math.IsNaN(q) || q < 0 || q > 1 { if math.IsNaN(q) || q < 0 || q > 1 {
annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange()))
} }
annos.Merge(enh.resetHistograms(inVec, args[1]))
if enh.signatureToMetricWithBuckets == nil { // Deal with the native histograms.
enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{} for _, sample := range enh.nativeHistogramSamples {
} else {
for _, v := range enh.signatureToMetricWithBuckets {
v.buckets = v.buckets[:0]
}
}
var histogramSamples []Sample
for _, sample := range inVec {
// We are only looking for classic buckets here. Remember
// the histograms for later treatment.
if sample.H != nil {
histogramSamples = append(histogramSamples, sample)
continue
}
upperBound, err := strconv.ParseFloat(
sample.Metric.Get(model.BucketLabel), 64,
)
if err != nil {
annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), args[1].PositionRange()))
continue
}
enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel)
mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]
if !ok {
sample.Metric = labels.NewBuilder(sample.Metric).
Del(excludedLabels...).
Labels()
mb = &metricWithBuckets{sample.Metric, nil}
enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb
}
mb.buckets = append(mb.buckets, Bucket{upperBound, sample.F})
}
// Now deal with the native histograms.
for _, sample := range histogramSamples {
// We have to reconstruct the exact same signature as above for
// a classic histogram, just ignoring any le label.
enh.lblBuf = sample.Metric.Bytes(enh.lblBuf)
if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 {
// At this data point, we have classic histogram
// buckets and a native histogram with the same name and
// labels. Do not evaluate anything.
annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), args[1].PositionRange()))
delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf))
continue
}
if !enh.enableDelayedNameRemoval { if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName() sample.Metric = sample.Metric.DropMetricName()
} }
@ -1527,7 +1456,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
}) })
} }
// Now do classic histograms that have already been filtered for conflicting native histograms. // Deal with classic histograms that have already been filtered for conflicting native histograms.
for _, mb := range enh.signatureToMetricWithBuckets { for _, mb := range enh.signatureToMetricWithBuckets {
if len(mb.buckets) > 0 { if len(mb.buckets) > 0 {
res, forcedMonotonicity, _ := BucketQuantile(q, mb.buckets) res, forcedMonotonicity, _ := BucketQuantile(q, mb.buckets)

View File

@ -110,6 +110,15 @@ type BinaryExpr struct {
ReturnBool bool ReturnBool bool
} }
// DurationExpr represents a binary expression between two duration expressions.
type DurationExpr struct {
Op ItemType // The operation of the expression.
LHS, RHS Expr // The operands on the respective sides of the operator.
Wrapped bool // Set when the duration is wrapped in parentheses.
StartPos posrange.Pos // For unary operations, the position of the operator.
}
// Call represents a function call. // Call represents a function call.
type Call struct { type Call struct {
Func *Function // The function that was called. Func *Function // The function that was called.
@ -124,7 +133,7 @@ type MatrixSelector struct {
// if the parser hasn't returned an error. // if the parser hasn't returned an error.
VectorSelector Expr VectorSelector Expr
Range time.Duration Range time.Duration
RangeExpr *DurationExpr
EndPos posrange.Pos EndPos posrange.Pos
} }
@ -132,16 +141,19 @@ type MatrixSelector struct {
type SubqueryExpr struct { type SubqueryExpr struct {
Expr Expr Expr Expr
Range time.Duration Range time.Duration
RangeExpr *DurationExpr
// OriginalOffset is the actual offset that was set in the query. // OriginalOffset is the actual offset that was set in the query.
// This never changes.
OriginalOffset time.Duration OriginalOffset time.Duration
// OriginalOffsetExpr is the actual offset expression that was set in the query.
OriginalOffsetExpr *DurationExpr
// Offset is the offset used during the query execution // Offset is the offset used during the query execution
// which is calculated using the original offset, at modifier time, // which is calculated using the original offset, offset expression, at modifier time,
// eval time, and subquery offsets in the AST tree. // eval time, and subquery offsets in the AST tree.
Offset time.Duration Offset time.Duration
Timestamp *int64 Timestamp *int64
StartOrEnd ItemType // Set when @ is used with start() or end() StartOrEnd ItemType // Set when @ is used with start() or end()
Step time.Duration Step time.Duration
StepExpr *DurationExpr
EndPos posrange.Pos EndPos posrange.Pos
} }
@ -150,6 +162,7 @@ type SubqueryExpr struct {
type NumberLiteral struct { type NumberLiteral struct {
Val float64 Val float64
Duration bool // Used to format the number as a duration.
PosRange posrange.PositionRange PosRange posrange.PositionRange
} }
@ -191,9 +204,10 @@ func (e *StepInvariantExpr) PositionRange() posrange.PositionRange {
// VectorSelector represents a Vector selection. // VectorSelector represents a Vector selection.
type VectorSelector struct { type VectorSelector struct {
Name string Name string
// OriginalOffset is the actual offset that was set in the query. // OriginalOffset is the actual offset calculated from OriginalOffsetExpr.
// This never changes.
OriginalOffset time.Duration OriginalOffset time.Duration
// OriginalOffsetExpr is the actual offset that was set in the query.
OriginalOffsetExpr *DurationExpr
// Offset is the offset used during the query execution // Offset is the offset used during the query execution
// which is calculated using the original offset, at modifier time, // which is calculated using the original offset, at modifier time,
// eval time, and subquery offsets in the AST tree. // eval time, and subquery offsets in the AST tree.
@ -244,6 +258,7 @@ func (e *BinaryExpr) Type() ValueType {
return ValueTypeVector return ValueTypeVector
} }
func (e *StepInvariantExpr) Type() ValueType { return e.Expr.Type() } func (e *StepInvariantExpr) Type() ValueType { return e.Expr.Type() }
func (e *DurationExpr) Type() ValueType { return ValueTypeScalar }
func (*AggregateExpr) PromQLExpr() {} func (*AggregateExpr) PromQLExpr() {}
func (*BinaryExpr) PromQLExpr() {} func (*BinaryExpr) PromQLExpr() {}
@ -256,6 +271,7 @@ func (*StringLiteral) PromQLExpr() {}
func (*UnaryExpr) PromQLExpr() {} func (*UnaryExpr) PromQLExpr() {}
func (*VectorSelector) PromQLExpr() {} func (*VectorSelector) PromQLExpr() {}
func (*StepInvariantExpr) PromQLExpr() {} func (*StepInvariantExpr) PromQLExpr() {}
func (*DurationExpr) PromQLExpr() {}
// VectorMatchCardinality describes the cardinality relationship // VectorMatchCardinality describes the cardinality relationship
// of two Vectors in a binary operation. // of two Vectors in a binary operation.
@ -438,6 +454,16 @@ func (e *BinaryExpr) PositionRange() posrange.PositionRange {
return mergeRanges(e.LHS, e.RHS) return mergeRanges(e.LHS, e.RHS)
} }
func (e *DurationExpr) PositionRange() posrange.PositionRange {
if e.LHS == nil {
return posrange.PositionRange{
Start: e.StartPos,
End: e.RHS.PositionRange().End,
}
}
return mergeRanges(e.LHS, e.RHS)
}
func (e *Call) PositionRange() posrange.PositionRange { func (e *Call) PositionRange() posrange.PositionRange {
return e.PosRange return e.PosRange
} }

View File

@ -186,7 +186,7 @@ START_METRIC_SELECTOR
%type <int> int %type <int> int
%type <uint> uint %type <uint> uint
%type <float> number series_value signed_number signed_or_unsigned_number %type <float> number series_value signed_number signed_or_unsigned_number
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector %type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector duration_expr paren_duration_expr positive_duration_expr
%start start %start start
@ -235,6 +235,7 @@ expr :
| unary_expr | unary_expr
| vector_selector | vector_selector
| step_invariant_expr | step_invariant_expr
| duration_expr
; ;
/* /*
@ -433,23 +434,35 @@ paren_expr : LEFT_PAREN expr RIGHT_PAREN
* Offset modifiers. * Offset modifiers.
*/ */
offset_expr: expr OFFSET number_duration_literal positive_duration_expr : duration_expr
{ {
numLit, _ := $3.(*NumberLiteral) if numLit, ok := $1.(*NumberLiteral); ok {
dur := time.Duration(numLit.Val * 1000) * time.Millisecond if numLit.Val <= 0 {
yylex.(*parser).addOffset($1, dur) yylex.(*parser).addParseErrf(numLit.PositionRange(), "duration must be greater than 0")
$$ = &NumberLiteral{Val: 0} // Return 0 on error.
break
}
$$ = $1
break
}
$$ = $1 $$ = $1
} }
| expr OFFSET SUB number_duration_literal ;
offset_expr: expr OFFSET duration_expr
{ {
numLit, _ := $4.(*NumberLiteral) if numLit, ok := $3.(*NumberLiteral); ok {
dur := time.Duration(numLit.Val * 1000) * time.Millisecond yylex.(*parser).addOffset($1, time.Duration(numLit.Val*1000)*time.Millisecond)
yylex.(*parser).addOffset($1, -dur) $$ = $1
break
}
yylex.(*parser).addOffsetExpr($1, $3.(*DurationExpr))
$$ = $1 $$ = $1
} }
| expr OFFSET error | expr OFFSET error
{ yylex.(*parser).unexpected("offset", "number or duration"); $$ = $1 } { yylex.(*parser).unexpected("offset", "number or duration"); $$ = $1 }
; ;
/* /*
* @ modifiers. * @ modifiers.
*/ */
@ -474,7 +487,7 @@ at_modifier_preprocessors: START | END;
* Subquery and range selectors. * Subquery and range selectors.
*/ */
matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET matrix_selector : expr LEFT_BRACKET positive_duration_expr RIGHT_BRACKET
{ {
var errMsg string var errMsg string
vs, ok := $1.(*VectorSelector) vs, ok := $1.(*VectorSelector)
@ -491,41 +504,60 @@ matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET
yylex.(*parser).addParseErrf(errRange, "%s", errMsg) yylex.(*parser).addParseErrf(errRange, "%s", errMsg)
} }
numLit, _ := $3.(*NumberLiteral) var rangeNl time.Duration
if numLit, ok := $3.(*NumberLiteral); ok {
rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond
}
rangeExpr, _ := $3.(*DurationExpr)
$$ = &MatrixSelector{ $$ = &MatrixSelector{
VectorSelector: $1.(Expr), VectorSelector: $1.(Expr),
Range: time.Duration(numLit.Val * 1000) * time.Millisecond, Range: rangeNl,
RangeExpr: rangeExpr,
EndPos: yylex.(*parser).lastClosing, EndPos: yylex.(*parser).lastClosing,
} }
} }
; ;
subquery_expr : expr LEFT_BRACKET number_duration_literal COLON number_duration_literal RIGHT_BRACKET subquery_expr : expr LEFT_BRACKET positive_duration_expr COLON positive_duration_expr RIGHT_BRACKET
{ {
numLitRange, _ := $3.(*NumberLiteral) var rangeNl time.Duration
numLitStep, _ := $5.(*NumberLiteral) var stepNl time.Duration
if numLit, ok := $3.(*NumberLiteral); ok {
rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond
}
rangeExpr, _ := $3.(*DurationExpr)
if numLit, ok := $5.(*NumberLiteral); ok {
stepNl = time.Duration(numLit.Val*1000)*time.Millisecond
}
stepExpr, _ := $5.(*DurationExpr)
$$ = &SubqueryExpr{ $$ = &SubqueryExpr{
Expr: $1.(Expr), Expr: $1.(Expr),
Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond, Range: rangeNl,
Step: time.Duration(numLitStep.Val * 1000) * time.Millisecond, RangeExpr: rangeExpr,
Step: stepNl,
StepExpr: stepExpr,
EndPos: $6.Pos + 1, EndPos: $6.Pos + 1,
} }
} }
| expr LEFT_BRACKET number_duration_literal COLON RIGHT_BRACKET | expr LEFT_BRACKET positive_duration_expr COLON RIGHT_BRACKET
{ {
numLitRange, _ := $3.(*NumberLiteral) var rangeNl time.Duration
if numLit, ok := $3.(*NumberLiteral); ok {
rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond
}
rangeExpr, _ := $3.(*DurationExpr)
$$ = &SubqueryExpr{ $$ = &SubqueryExpr{
Expr: $1.(Expr), Expr: $1.(Expr),
Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond, Range: rangeNl,
Step: 0, RangeExpr: rangeExpr,
EndPos: $5.Pos + 1, EndPos: $5.Pos + 1,
} }
} }
| expr LEFT_BRACKET number_duration_literal COLON number_duration_literal error | expr LEFT_BRACKET positive_duration_expr COLON positive_duration_expr error
{ yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 } { yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 }
| expr LEFT_BRACKET number_duration_literal COLON error | expr LEFT_BRACKET positive_duration_expr COLON error
{ yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\""); $$ = $1 } { yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\""); $$ = $1 }
| expr LEFT_BRACKET number_duration_literal error | expr LEFT_BRACKET positive_duration_expr error
{ yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 } { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 }
| expr LEFT_BRACKET error | expr LEFT_BRACKET error
{ yylex.(*parser).unexpected("subquery selector", "number or duration"); $$ = $1 } { yylex.(*parser).unexpected("subquery selector", "number or duration"); $$ = $1 }
@ -930,6 +962,7 @@ number_duration_literal : NUMBER
$$ = &NumberLiteral{ $$ = &NumberLiteral{
Val: dur.Seconds(), Val: dur.Seconds(),
PosRange: $1.PositionRange(), PosRange: $1.PositionRange(),
Duration: true,
} }
} }
; ;
@ -997,4 +1030,105 @@ maybe_grouping_labels: /* empty */ { $$ = nil }
| grouping_labels | grouping_labels
; ;
/*
* Duration expressions.
*/
duration_expr : number_duration_literal
{
nl := $1.(*NumberLiteral)
if nl.Val > 1<<63/1e9 || nl.Val < -(1<<63)/1e9 {
yylex.(*parser).addParseErrf(nl.PosRange, "duration out of range")
$$ = &NumberLiteral{Val: 0}
break
}
$$ = nl
}
| unary_op duration_expr %prec MUL
{
switch expr := $2.(type) {
case *NumberLiteral:
if $1.Typ == SUB {
expr.Val *= -1
}
if expr.Val > 1<<63/1e9 || expr.Val < -(1<<63)/1e9 {
yylex.(*parser).addParseErrf($1.PositionRange(), "duration out of range")
$$ = &NumberLiteral{Val: 0}
break
}
expr.PosRange.Start = $1.Pos
$$ = expr
break
case *DurationExpr:
if $1.Typ == SUB {
$$ = &DurationExpr{
Op: SUB,
RHS: expr,
StartPos: $1.Pos,
}
break
}
$$ = expr
break
default:
yylex.(*parser).addParseErrf($1.PositionRange(), "expected number literal or duration expression")
$$ = &NumberLiteral{Val: 0}
break
}
}
| duration_expr ADD duration_expr
{
yylex.(*parser).experimentalDurationExpr($1.(Expr))
$$ = &DurationExpr{Op: ADD, LHS: $1.(Expr), RHS: $3.(Expr)}
}
| duration_expr SUB duration_expr
{
yylex.(*parser).experimentalDurationExpr($1.(Expr))
$$ = &DurationExpr{Op: SUB, LHS: $1.(Expr), RHS: $3.(Expr)}
}
| duration_expr MUL duration_expr
{
yylex.(*parser).experimentalDurationExpr($1.(Expr))
$$ = &DurationExpr{Op: MUL, LHS: $1.(Expr), RHS: $3.(Expr)}
}
| duration_expr DIV duration_expr
{
yylex.(*parser).experimentalDurationExpr($1.(Expr))
if nl, ok := $3.(*NumberLiteral); ok && nl.Val == 0 {
yylex.(*parser).addParseErrf($2.PositionRange(), "division by zero")
$$ = &NumberLiteral{Val: 0}
break
}
$$ = &DurationExpr{Op: DIV, LHS: $1.(Expr), RHS: $3.(Expr)}
}
| duration_expr MOD duration_expr
{
yylex.(*parser).experimentalDurationExpr($1.(Expr))
if nl, ok := $3.(*NumberLiteral); ok && nl.Val == 0 {
yylex.(*parser).addParseErrf($2.PositionRange(), "modulo by zero")
$$ = &NumberLiteral{Val: 0}
break
}
$$ = &DurationExpr{Op: MOD, LHS: $1.(Expr), RHS: $3.(Expr)}
}
| duration_expr POW duration_expr
{
yylex.(*parser).experimentalDurationExpr($1.(Expr))
$$ = &DurationExpr{Op: POW, LHS: $1.(Expr), RHS: $3.(Expr)}
}
| paren_duration_expr
;
paren_duration_expr : LEFT_PAREN duration_expr RIGHT_PAREN
{
yylex.(*parser).experimentalDurationExpr($2.(Expr))
if durationExpr, ok := $2.(*DurationExpr); ok {
durationExpr.Wrapped = true
$$ = durationExpr
break
}
$$ = $2
}
;
%% %%

File diff suppressed because it is too large Load Diff

View File

@ -277,6 +277,7 @@ type Lexer struct {
braceOpen bool // Whether a { is opened. braceOpen bool // Whether a { is opened.
bracketOpen bool // Whether a [ is opened. bracketOpen bool // Whether a [ is opened.
gotColon bool // Whether we got a ':' after [ was opened. gotColon bool // Whether we got a ':' after [ was opened.
gotDuration bool // Whether we got a duration after [ was opened.
stringOpen rune // Quote rune of the string currently being read. stringOpen rune // Quote rune of the string currently being read.
// series description variables for internal PromQL testing framework as well as in promtool rules unit tests. // series description variables for internal PromQL testing framework as well as in promtool rules unit tests.
@ -491,7 +492,7 @@ func lexStatements(l *Lexer) stateFn {
skipSpaces(l) skipSpaces(l)
} }
l.bracketOpen = true l.bracketOpen = true
return lexNumberOrDuration return lexDurationExpr
case r == ']': case r == ']':
if !l.bracketOpen { if !l.bracketOpen {
return l.errorf("unexpected right bracket %q", r) return l.errorf("unexpected right bracket %q", r)
@ -549,6 +550,8 @@ func lexHistogram(l *Lexer) stateFn {
return lexNumber return lexNumber
case r == '[': case r == '[':
l.bracketOpen = true l.bracketOpen = true
l.gotColon = false
l.gotDuration = false
l.emit(LEFT_BRACKET) l.emit(LEFT_BRACKET)
return lexBuckets return lexBuckets
case r == '}' && l.peek() == '}': case r == '}' && l.peek() == '}':
@ -1077,3 +1080,64 @@ func isDigit(r rune) bool {
func isAlpha(r rune) bool { func isAlpha(r rune) bool {
return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z')
} }
// lexDurationExpr scans arithmetic expressions within brackets for duration expressions.
func lexDurationExpr(l *Lexer) stateFn {
switch r := l.next(); {
case r == eof:
return l.errorf("unexpected end of input in duration expression")
case r == ']':
l.emit(RIGHT_BRACKET)
l.bracketOpen = false
l.gotColon = false
return lexStatements
case r == ':':
l.emit(COLON)
if !l.gotDuration {
return l.errorf("unexpected colon before duration in duration expression")
}
if l.gotColon {
return l.errorf("unexpected repeated colon in duration expression")
}
l.gotColon = true
return lexDurationExpr
case r == '(':
l.emit(LEFT_PAREN)
l.parenDepth++
return lexDurationExpr
case r == ')':
l.emit(RIGHT_PAREN)
l.parenDepth--
if l.parenDepth < 0 {
return l.errorf("unexpected right parenthesis %q", r)
}
return lexDurationExpr
case isSpace(r):
skipSpaces(l)
return lexDurationExpr
case r == '+':
l.emit(ADD)
return lexDurationExpr
case r == '-':
l.emit(SUB)
return lexDurationExpr
case r == '*':
l.emit(MUL)
return lexDurationExpr
case r == '/':
l.emit(DIV)
return lexDurationExpr
case r == '%':
l.emit(MOD)
return lexDurationExpr
case r == '^':
l.emit(POW)
return lexDurationExpr
case isDigit(r) || (r == '.' && isDigit(l.peek())):
l.backup()
l.gotDuration = true
return lexNumberOrDuration
default:
return l.errorf("unexpected character in duration expression: %q", r)
}
}

View File

@ -951,6 +951,10 @@ var tests = []struct {
input: `test:name{on!~"bar"}[:4s]`, input: `test:name{on!~"bar"}[:4s]`,
fail: true, fail: true,
}, },
{
input: `test:name{on!~"bar"}[1s:1s:1s]`,
fail: true,
},
}, },
}, },
} }

View File

@ -39,6 +39,9 @@ var parserPool = sync.Pool{
}, },
} }
// ExperimentalDurationExpr is a flag to enable experimental duration expression parsing.
var ExperimentalDurationExpr bool
type Parser interface { type Parser interface {
ParseExpr() (Expr, error) ParseExpr() (Expr, error)
Close() Close()
@ -881,9 +884,6 @@ func parseDuration(ds string) (time.Duration, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
if dur == 0 {
return 0, errors.New("duration must be greater than 0")
}
return time.Duration(dur), nil return time.Duration(dur), nil
} }
@ -939,11 +939,13 @@ func (p *parser) newMetricNameMatcher(value Item) *labels.Matcher {
// addOffset is used to set the offset in the generated parser. // addOffset is used to set the offset in the generated parser.
func (p *parser) addOffset(e Node, offset time.Duration) { func (p *parser) addOffset(e Node, offset time.Duration) {
var orgoffsetp *time.Duration var orgoffsetp *time.Duration
var orgoffsetexprp *DurationExpr
var endPosp *posrange.Pos var endPosp *posrange.Pos
switch s := e.(type) { switch s := e.(type) {
case *VectorSelector: case *VectorSelector:
orgoffsetp = &s.OriginalOffset orgoffsetp = &s.OriginalOffset
orgoffsetexprp = s.OriginalOffsetExpr
endPosp = &s.PosRange.End endPosp = &s.PosRange.End
case *MatrixSelector: case *MatrixSelector:
vs, ok := s.VectorSelector.(*VectorSelector) vs, ok := s.VectorSelector.(*VectorSelector)
@ -952,9 +954,11 @@ func (p *parser) addOffset(e Node, offset time.Duration) {
return return
} }
orgoffsetp = &vs.OriginalOffset orgoffsetp = &vs.OriginalOffset
orgoffsetexprp = vs.OriginalOffsetExpr
endPosp = &s.EndPos endPosp = &s.EndPos
case *SubqueryExpr: case *SubqueryExpr:
orgoffsetp = &s.OriginalOffset orgoffsetp = &s.OriginalOffset
orgoffsetexprp = s.OriginalOffsetExpr
endPosp = &s.EndPos endPosp = &s.EndPos
default: default:
p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant vector selector or range vector selector or a subquery") p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant vector selector or range vector selector or a subquery")
@ -963,7 +967,7 @@ func (p *parser) addOffset(e Node, offset time.Duration) {
// it is already ensured by parseDuration func that there never will be a zero offset modifier // it is already ensured by parseDuration func that there never will be a zero offset modifier
switch { switch {
case *orgoffsetp != 0: case *orgoffsetp != 0 || orgoffsetexprp != nil:
p.addParseErrf(e.PositionRange(), "offset may not be set multiple times") p.addParseErrf(e.PositionRange(), "offset may not be set multiple times")
case orgoffsetp != nil: case orgoffsetp != nil:
*orgoffsetp = offset *orgoffsetp = offset
@ -972,6 +976,45 @@ func (p *parser) addOffset(e Node, offset time.Duration) {
*endPosp = p.lastClosing *endPosp = p.lastClosing
} }
// addOffsetExpr is used to set the offset expression in the generated parser.
func (p *parser) addOffsetExpr(e Node, expr *DurationExpr) {
var orgoffsetp *time.Duration
var orgoffsetexprp **DurationExpr
var endPosp *posrange.Pos
switch s := e.(type) {
case *VectorSelector:
orgoffsetp = &s.OriginalOffset
orgoffsetexprp = &s.OriginalOffsetExpr
endPosp = &s.PosRange.End
case *MatrixSelector:
vs, ok := s.VectorSelector.(*VectorSelector)
if !ok {
p.addParseErrf(e.PositionRange(), "ranges only allowed for vector selectors")
return
}
orgoffsetp = &vs.OriginalOffset
orgoffsetexprp = &vs.OriginalOffsetExpr
endPosp = &s.EndPos
case *SubqueryExpr:
orgoffsetp = &s.OriginalOffset
orgoffsetexprp = &s.OriginalOffsetExpr
endPosp = &s.EndPos
default:
p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant vector selector or range vector selector or a subquery")
return
}
switch {
case *orgoffsetp != 0 || *orgoffsetexprp != nil:
p.addParseErrf(e.PositionRange(), "offset may not be set multiple times")
case orgoffsetexprp != nil:
*orgoffsetexprp = expr
}
*endPosp = p.lastClosing
}
// setTimestamp is used to set the timestamp from the @ modifier in the generated parser. // setTimestamp is used to set the timestamp from the @ modifier in the generated parser.
func (p *parser) setTimestamp(e Node, ts float64) { func (p *parser) setTimestamp(e Node, ts float64) {
if math.IsInf(ts, -1) || math.IsInf(ts, 1) || math.IsNaN(ts) || if math.IsInf(ts, -1) || math.IsInf(ts, 1) || math.IsNaN(ts) ||
@ -1045,6 +1088,12 @@ func (p *parser) getAtModifierVars(e Node) (**int64, *ItemType, *posrange.Pos, b
return timestampp, preprocp, endPosp, true return timestampp, preprocp, endPosp, true
} }
func (p *parser) experimentalDurationExpr(e Expr) {
if !ExperimentalDurationExpr {
p.addParseErrf(e.PositionRange(), "experimental duration expression is not enabled")
}
}
func MustLabelMatcher(mt labels.MatchType, name, val string) *labels.Matcher { func MustLabelMatcher(mt labels.MatchType, name, val string) *labels.Matcher {
m, err := labels.NewMatcher(mt, name, val) m, err := labels.NewMatcher(mt, name, val)
if err != nil { if err != nil {

View File

@ -2337,12 +2337,12 @@ var testExpr = []struct {
{ {
input: `foo[]`, input: `foo[]`,
fail: true, fail: true,
errMsg: "bad number or duration syntax: \"\"", errMsg: "unexpected \"]\" in subquery selector, expected number or duration",
}, },
{ {
input: `foo[-1]`, input: `foo[-1]`,
fail: true, fail: true,
errMsg: "bad number or duration syntax: \"\"", errMsg: "duration must be greater than 0",
}, },
{ {
input: `some_metric[5m] OFFSET 1mm`, input: `some_metric[5m] OFFSET 1mm`,
@ -3091,7 +3091,7 @@ var testExpr = []struct {
{ {
input: `foo{bar="baz"}[`, input: `foo{bar="baz"}[`,
fail: true, fail: true,
errMsg: `1:16: parse error: bad number or duration syntax: ""`, errMsg: `unexpected end of input in duration expression`,
}, },
{ {
input: `foo{bar="baz"}[10m:6s]`, input: `foo{bar="baz"}[10m:6s]`,
@ -3946,6 +3946,304 @@ var testExpr = []struct {
}, },
}, },
}, },
{
input: `foo[11s+10s-5*2^2]`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
RangeExpr: &DurationExpr{
Op: SUB,
LHS: &DurationExpr{
Op: ADD,
LHS: &NumberLiteral{
Val: 11,
PosRange: posrange.PositionRange{
Start: 4,
End: 7,
},
Duration: true,
},
RHS: &NumberLiteral{
Val: 10,
PosRange: posrange.PositionRange{
Start: 8,
End: 11,
},
Duration: true,
},
},
RHS: &DurationExpr{
Op: MUL,
LHS: &NumberLiteral{Val: 5, PosRange: posrange.PositionRange{Start: 12, End: 13}},
RHS: &DurationExpr{
Op: POW,
LHS: &NumberLiteral{Val: 2, PosRange: posrange.PositionRange{Start: 14, End: 15}},
RHS: &NumberLiteral{Val: 2, PosRange: posrange.PositionRange{Start: 16, End: 17}},
},
},
},
EndPos: 18,
},
},
{
input: `foo[-(10s-5s)+20s]`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
RangeExpr: &DurationExpr{
Op: ADD,
LHS: &DurationExpr{
Op: SUB,
StartPos: 4,
RHS: &DurationExpr{
Op: SUB,
LHS: &NumberLiteral{
Val: 10,
PosRange: posrange.PositionRange{
Start: 6,
End: 9,
},
Duration: true,
},
RHS: &NumberLiteral{
Val: 5,
PosRange: posrange.PositionRange{
Start: 10,
End: 12,
},
Duration: true,
},
Wrapped: true,
},
},
RHS: &NumberLiteral{
Val: 20,
PosRange: posrange.PositionRange{
Start: 14,
End: 17,
},
Duration: true,
},
},
EndPos: 18,
},
},
{
input: `foo[-10s+15s]`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
RangeExpr: &DurationExpr{
Op: ADD,
LHS: &NumberLiteral{
Val: -10,
PosRange: posrange.PositionRange{
Start: 4,
End: 8,
},
Duration: true,
},
RHS: &NumberLiteral{
Val: 15,
PosRange: posrange.PositionRange{
Start: 9,
End: 12,
},
Duration: true,
},
},
EndPos: 13,
},
},
{
input: `foo[4s+4s:1s*2] offset (5s-8)`,
expected: &SubqueryExpr{
Expr: &VectorSelector{
Name: "foo",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
RangeExpr: &DurationExpr{
Op: ADD,
LHS: &NumberLiteral{
Val: 4,
PosRange: posrange.PositionRange{
Start: 4,
End: 6,
},
Duration: true,
},
RHS: &NumberLiteral{
Val: 4,
PosRange: posrange.PositionRange{
Start: 7,
End: 9,
},
Duration: true,
},
},
StepExpr: &DurationExpr{
Op: MUL,
LHS: &NumberLiteral{
Val: 1,
PosRange: posrange.PositionRange{
Start: 10,
End: 12,
},
Duration: true,
},
RHS: &NumberLiteral{
Val: 2,
PosRange: posrange.PositionRange{
Start: 13,
End: 14,
},
},
},
OriginalOffsetExpr: &DurationExpr{
Op: SUB,
LHS: &NumberLiteral{
Val: 5,
PosRange: posrange.PositionRange{
Start: 24,
End: 26,
},
Duration: true,
},
RHS: &NumberLiteral{
Val: 8,
PosRange: posrange.PositionRange{
Start: 27,
End: 28,
},
},
Wrapped: true,
},
EndPos: 29,
},
},
{
input: `foo offset 5s-8`,
expected: &BinaryExpr{
Op: SUB,
LHS: &VectorSelector{
Name: "foo",
OriginalOffset: 5 * time.Second,
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 13,
},
},
RHS: &NumberLiteral{
Val: 8,
PosRange: posrange.PositionRange{
Start: 14,
End: 15,
},
},
},
},
{
input: `rate(foo[2m+2m])`,
expected: &Call{
Func: MustGetFunction("rate"),
Args: Expressions{
&MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 5,
End: 8,
},
},
RangeExpr: &DurationExpr{
Op: ADD,
LHS: &NumberLiteral{
Val: 120,
PosRange: posrange.PositionRange{
Start: 9,
End: 11,
},
Duration: true,
},
RHS: &NumberLiteral{
Val: 120,
PosRange: posrange.PositionRange{
Start: 12,
End: 14,
},
Duration: true,
},
},
EndPos: 15,
},
},
PosRange: posrange.PositionRange{
Start: 0,
End: 16,
},
},
},
{
input: `foo[5s/0d]`,
fail: true,
errMsg: `division by zero`,
},
{
input: `foo offset (4d/0)`,
fail: true,
errMsg: `division by zero`,
},
{
input: `foo[5s%0d]`,
fail: true,
errMsg: `modulo by zero`,
},
{
input: `foo offset 9.5e10`,
fail: true,
errMsg: `duration out of range`,
},
{
input: `foo[9.5e10]`,
fail: true,
errMsg: `duration out of range`,
},
} }
func makeInt64Pointer(val int64) *int64 { func makeInt64Pointer(val int64) *int64 {
@ -3965,8 +4263,11 @@ func readable(s string) string {
func TestParseExpressions(t *testing.T) { func TestParseExpressions(t *testing.T) {
// Enable experimental functions testing. // Enable experimental functions testing.
EnableExperimentalFunctions = true EnableExperimentalFunctions = true
// Enable experimental duration expression parsing.
ExperimentalDurationExpr = true
t.Cleanup(func() { t.Cleanup(func() {
EnableExperimentalFunctions = false EnableExperimentalFunctions = false
ExperimentalDurationExpr = false
}) })
for _, test := range testExpr { for _, test := range testExpr {

View File

@ -79,6 +79,22 @@ func (e *BinaryExpr) Pretty(level int) string {
return fmt.Sprintf("%s\n%s%s%s%s\n%s", e.LHS.Pretty(level+1), indent(level), e.Op, returnBool, matching, e.RHS.Pretty(level+1)) return fmt.Sprintf("%s\n%s%s%s%s\n%s", e.LHS.Pretty(level+1), indent(level), e.Op, returnBool, matching, e.RHS.Pretty(level+1))
} }
func (e *DurationExpr) Pretty(int) string {
var s string
fmt.Println("e.LHS", e.LHS)
fmt.Println("e.RHS", e.RHS)
if e.LHS == nil {
// This is a unary negative duration expression.
s = fmt.Sprintf("%s %s", e.Op, e.RHS.Pretty(0))
} else {
s = fmt.Sprintf("%s %s %s", e.LHS.Pretty(0), e.Op, e.RHS.Pretty(0))
}
if e.Wrapped {
s = fmt.Sprintf("(%s)", s)
}
return s
}
func (e *Call) Pretty(level int) string { func (e *Call) Pretty(level int) string {
s := indent(level) s := indent(level)
if !needsSplit(e) { if !needsSplit(e) {

View File

@ -668,3 +668,41 @@ func TestUnaryPretty(t *testing.T) {
}) })
} }
} }
func TestDurationExprPretty(t *testing.T) {
// Enable experimental duration expression parsing.
ExperimentalDurationExpr = true
t.Cleanup(func() {
ExperimentalDurationExpr = false
})
maxCharactersPerLine = 10
inputs := []struct {
in, out string
}{
{
in: `rate(foo[2*1h])`,
out: `rate(
foo[2 * 1h]
)`,
},
{
in: `rate(foo[2*1h])`,
out: `rate(
foo[2 * 1h]
)`,
},
{
in: `rate(foo[-5m+35m])`,
out: `rate(
foo[-5m + 35m]
)`,
},
}
for _, test := range inputs {
t.Run(test.in, func(t *testing.T) {
expr, err := ParseExpr(test.in)
require.NoError(t, err)
require.Equal(t, test.out, Prettify(expr))
})
}
}

View File

@ -146,6 +146,24 @@ func (node *BinaryExpr) getMatchingStr() string {
return matching return matching
} }
func (node *DurationExpr) String() string {
var expr string
if node.LHS == nil {
// This is a unary negative duration expression.
expr = fmt.Sprintf("%s%s", node.Op, node.RHS)
} else {
expr = fmt.Sprintf("%s %s %s", node.LHS, node.Op, node.RHS)
}
if node.Wrapped {
return fmt.Sprintf("(%s)", expr)
}
return expr
}
func (node *DurationExpr) ShortString() string {
return node.Op.String()
}
func (node *Call) String() string { func (node *Call) String() string {
return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args) return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args)
} }
@ -159,6 +177,8 @@ func (node *MatrixSelector) atOffset() (string, string) {
vecSelector := node.VectorSelector.(*VectorSelector) vecSelector := node.VectorSelector.(*VectorSelector)
offset := "" offset := ""
switch { switch {
case vecSelector.OriginalOffsetExpr != nil:
offset = fmt.Sprintf(" offset %s", vecSelector.OriginalOffsetExpr)
case vecSelector.OriginalOffset > time.Duration(0): case vecSelector.OriginalOffset > time.Duration(0):
offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset)) offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset))
case vecSelector.OriginalOffset < time.Duration(0): case vecSelector.OriginalOffset < time.Duration(0):
@ -181,21 +201,30 @@ func (node *MatrixSelector) String() string {
// Copy the Vector selector before changing the offset // Copy the Vector selector before changing the offset
vecSelector := *node.VectorSelector.(*VectorSelector) vecSelector := *node.VectorSelector.(*VectorSelector)
// Do not print the @ and offset twice. // Do not print the @ and offset twice.
offsetVal, atVal, preproc := vecSelector.OriginalOffset, vecSelector.Timestamp, vecSelector.StartOrEnd offsetVal, offsetExprVal, atVal, preproc := vecSelector.OriginalOffset, vecSelector.OriginalOffsetExpr, vecSelector.Timestamp, vecSelector.StartOrEnd
vecSelector.OriginalOffset = 0 vecSelector.OriginalOffset = 0
vecSelector.OriginalOffsetExpr = nil
vecSelector.Timestamp = nil vecSelector.Timestamp = nil
vecSelector.StartOrEnd = 0 vecSelector.StartOrEnd = 0
str := fmt.Sprintf("%s[%s]%s%s", vecSelector.String(), model.Duration(node.Range), at, offset) rangeStr := model.Duration(node.Range).String()
if node.RangeExpr != nil {
rangeStr = node.RangeExpr.String()
}
str := fmt.Sprintf("%s[%s]%s%s", vecSelector.String(), rangeStr, at, offset)
vecSelector.OriginalOffset, vecSelector.Timestamp, vecSelector.StartOrEnd = offsetVal, atVal, preproc vecSelector.OriginalOffset, vecSelector.OriginalOffsetExpr, vecSelector.Timestamp, vecSelector.StartOrEnd = offsetVal, offsetExprVal, atVal, preproc
return str return str
} }
func (node *MatrixSelector) ShortString() string { func (node *MatrixSelector) ShortString() string {
at, offset := node.atOffset() at, offset := node.atOffset()
return fmt.Sprintf("[%s]%s%s", model.Duration(node.Range), at, offset) rangeStr := model.Duration(node.Range).String()
if node.RangeExpr != nil {
rangeStr = node.RangeExpr.String()
}
return fmt.Sprintf("[%s]%s%s", rangeStr, at, offset)
} }
func (node *SubqueryExpr) String() string { func (node *SubqueryExpr) String() string {
@ -211,9 +240,13 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string {
step := "" step := ""
if node.Step != 0 { if node.Step != 0 {
step = model.Duration(node.Step).String() step = model.Duration(node.Step).String()
} else if node.StepExpr != nil {
step = node.StepExpr.String()
} }
offset := "" offset := ""
switch { switch {
case node.OriginalOffsetExpr != nil:
offset = fmt.Sprintf(" offset %s", node.OriginalOffsetExpr)
case node.OriginalOffset > time.Duration(0): case node.OriginalOffset > time.Duration(0):
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
case node.OriginalOffset < time.Duration(0): case node.OriginalOffset < time.Duration(0):
@ -228,10 +261,20 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string {
case node.StartOrEnd == END: case node.StartOrEnd == END:
at = " @ end()" at = " @ end()"
} }
return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset) rangeStr := model.Duration(node.Range).String()
if node.RangeExpr != nil {
rangeStr = node.RangeExpr.String()
}
return fmt.Sprintf("[%s:%s]%s%s", rangeStr, step, at, offset)
} }
func (node *NumberLiteral) String() string { func (node *NumberLiteral) String() string {
if node.Duration {
if node.Val < 0 {
return fmt.Sprintf("-%s", model.Duration(-node.Val*1e9).String())
}
return model.Duration(node.Val * 1e9).String()
}
return strconv.FormatFloat(node.Val, 'f', -1, 64) return strconv.FormatFloat(node.Val, 'f', -1, 64)
} }
@ -265,6 +308,8 @@ func (node *VectorSelector) String() string {
} }
offset := "" offset := ""
switch { switch {
case node.OriginalOffsetExpr != nil:
offset = fmt.Sprintf(" offset %s", node.OriginalOffsetExpr)
case node.OriginalOffset > time.Duration(0): case node.OriginalOffset > time.Duration(0):
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
case node.OriginalOffset < time.Duration(0): case node.OriginalOffset < time.Duration(0):

View File

@ -117,8 +117,12 @@ func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
// RunBuiltinTestsWithStorage runs an acceptance test suite against the provided engine and storage. // RunBuiltinTestsWithStorage runs an acceptance test suite against the provided engine and storage.
func RunBuiltinTestsWithStorage(t TBRun, engine promql.QueryEngine, newStorage func(testutil.T) storage.Storage) { func RunBuiltinTestsWithStorage(t TBRun, engine promql.QueryEngine, newStorage func(testutil.T) storage.Storage) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) t.Cleanup(func() {
parser.EnableExperimentalFunctions = false
parser.ExperimentalDurationExpr = false
})
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true
parser.ExperimentalDurationExpr = true
files, err := fs.Glob(testsFs, "*/*.test") files, err := fs.Glob(testsFs, "*/*.test")
require.NoError(t, err) require.NoError(t, err)
@ -1501,6 +1505,9 @@ type LazyLoaderOpts struct {
// Prometheus v2.33). They can still be disabled here for legacy and // Prometheus v2.33). They can still be disabled here for legacy and
// other uses. // other uses.
EnableAtModifier, EnableNegativeOffset bool EnableAtModifier, EnableNegativeOffset bool
// Currently defaults to false, matches the "promql-delayed-name-removal"
// feature flag.
EnableDelayedNameRemoval bool
} }
// NewLazyLoader returns an initialized empty LazyLoader. // NewLazyLoader returns an initialized empty LazyLoader.
@ -1563,7 +1570,7 @@ func (ll *LazyLoader) clear() error {
NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(ll.SubqueryInterval) }, NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(ll.SubqueryInterval) },
EnableAtModifier: ll.opts.EnableAtModifier, EnableAtModifier: ll.opts.EnableAtModifier,
EnableNegativeOffset: ll.opts.EnableNegativeOffset, EnableNegativeOffset: ll.opts.EnableNegativeOffset,
EnableDelayedNameRemoval: true, EnableDelayedNameRemoval: ll.opts.EnableDelayedNameRemoval,
} }
ll.queryEngine = promql.NewEngine(opts) ll.queryEngine = promql.NewEngine(opts)

View File

@ -0,0 +1,121 @@
# Test for different duration expression formats in range selectors.
# This tests the parser's ability to handle various duration expression.
# Set up a basic counter that increases steadily.
load 5m
http_requests{path="/foo"} 1 2 3 0 1 0 0 1 2 0
http_requests{path="/bar"} 1 2 3 4 5 1 2 3 4 5
http_requests{path="/biz"} 0 0 0 0 0 1 1 1 1 1
# Test basic duration with unit: [30m]
eval instant at 50m changes(http_requests[30m])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test addition in duration: [26m+4m]
eval instant at 50m changes(http_requests[26m+4m])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test addition with 0 in duration: [30m+0s]
eval instant at 50m changes(http_requests[30m+0s])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test raw seconds: [1800]
eval instant at 50m changes(http_requests[1800])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test seconds with multiplication: [60*30]
eval instant at 50m changes(http_requests[60*30])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test minutes with multiplication: [2m*15]
eval instant at 50m changes(http_requests[2m*15])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test complex expression with parentheses: [2m*(10+5)]
eval instant at 50m changes(http_requests[2m*(10+5)])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test mixed units: [29m+60s]
eval instant at 50m changes(http_requests[29m+60s])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test nested parentheses: [24m+((1.5*2m)+2m)]
eval instant at 50m changes(http_requests[24m+((1.5*2m)+2m)])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test start with -: [-5m+35m]
eval instant at 50m changes(http_requests[-5m+35m])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test division: [1h/2]
eval instant at 50m changes(http_requests[1h/2])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test modulo: [1h30m % 1h]
eval instant at 50m changes(http_requests[1h30m % 1h])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test modulo and calculation: [30m1s-30m1s % 1m]
eval instant at 50m changes(http_requests[30m1s-30m1s % 1m])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
# Test combination of operations: [(9m30s+30s)*3]
eval instant at 50m changes(http_requests[(9m30s+30s)*3])
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
clear
load 10s
metric1_total 0+1x1000
# In subquery expression.
eval instant at 1000s sum_over_time(metric1_total[29s+1s:5s+5s])
{} 297
# Test complex expressions in subquery ranges.
eval instant at 1000s sum_over_time(metric1_total[29s+1s:((((8 - 2) / 3) * 7s) % 4) + 8000ms])
{} 297
# Test complex expressions in offset ranges.
eval instant at 1200s sum_over_time(metric1_total[29s+1s:20*500ms] offset (20*(((((8 - 2) / 3) * 7s) % 4) + 8000ms)))
{} 297
# Test complex expressions in offset ranges with negative offset.
eval instant at 800s sum_over_time(metric1_total[29s+1s:20*500ms] offset -(20*(((((8 - 2) / 3) * 7s) % 4) + 8000ms)))
{} 297
# Test offset precedence with parentheses: offset (100 + 2)
eval instant at 1000s metric1_total offset (100 + 2)
{__name__="metric1_total"} 89
# Test offset precedence without parentheses: offset 100 + 2
eval instant at 1000s metric1_total offset 100 + 2
{} 92

View File

@ -95,24 +95,52 @@ eval instant at 50m histogram_avg(testhistogram3)
# Test histogram_stddev. This has no classic equivalent. # Test histogram_stddev. This has no classic equivalent.
eval instant at 50m histogram_stddev(testhistogram3) eval instant at 50m histogram_stddev(testhistogram3)
{start="positive"} 2.8189265757336734 {start="positive"} 2.7435461458749795
{start="negative"} 4.182715937754936 {start="negative"} 4.187667907081458
# Test histogram_stdvar. This has no classic equivalent. # Test histogram_stdvar. This has no classic equivalent.
eval instant at 50m histogram_stdvar(testhistogram3) eval instant at 50m histogram_stdvar(testhistogram3)
{start="positive"} 7.946347039377573 {start="positive"} 7.527045454545455
{start="negative"} 17.495112615949154 {start="negative"} 17.5365625
# Test histogram_fraction. # Test histogram_fraction.
#
eval instant at 50m histogram_fraction(0, 4, testhistogram2)
{} 0.6666666666666666
eval instant at 50m histogram_fraction(0, 4, testhistogram2_bucket)
{} 0.6666666666666666
eval instant at 50m histogram_fraction(0, 6, testhistogram2)
{} 1
eval instant at 50m histogram_fraction(0, 6, testhistogram2_bucket)
{} 1
eval instant at 50m histogram_fraction(0, 3.5, testhistogram2)
{} 0.5833333333333334
eval instant at 50m histogram_fraction(0, 3.5, testhistogram2_bucket)
{} 0.5833333333333334
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3) eval instant at 50m histogram_fraction(0, 0.2, testhistogram3)
{start="positive"} 0.6363636363636364 {start="positive"} 0.6363636363636364
{start="negative"} 0 {start="negative"} 0
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3_bucket)
{start="positive"} 0.6363636363636364
{start="negative"} 0
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m])) eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m]))
{start="positive"} 0.6363636363636364 {start="positive"} 0.6363636363636364
{start="negative"} 0 {start="negative"} 0
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3_bucket[10m]))
{start="positive"} 0.6363636363636364
{start="negative"} 0
# In the classic histogram, we can access the corresponding bucket (if # In the classic histogram, we can access the corresponding bucket (if
# it exists) and divide by the count to get the same result. # it exists) and divide by the count to get the same result.

View File

@ -337,7 +337,7 @@ load 10m
histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3) eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3)
{} 42.947236400258 {} 42.94723640026
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3)
{} 1844.4651144196398 {} 1844.4651144196398

View File

@ -150,3 +150,10 @@ eval instant at 10m increase(native_histogram[10m:3m])
# by the sub-query multiple times. # by the sub-query multiple times.
eval instant at 10m increase(native_histogram[10m:15s]) eval instant at 10m increase(native_histogram[10m:15s])
{} {{count:30.769230769230766 sum:30.769230769230766}} {} {{count:30.769230769230766 sum:30.769230769230766}}
# When range < resolution and the first evaluation time is out of range.
load 5m
foo 3+0x10
eval instant at 12m min_over_time((topk(1, foo))[1m:5m])
#empty

View File

@ -448,6 +448,84 @@ func HistogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6
return (upperRank - lowerRank) / h.Count return (upperRank - lowerRank) / h.Count
} }
// BucketFraction is a version of HistogramFraction for classic histograms.
func BucketFraction(lower, upper float64, buckets Buckets) float64 {
slices.SortFunc(buckets, func(a, b Bucket) int {
// We don't expect the bucket boundary to be a NaN.
if a.UpperBound < b.UpperBound {
return -1
}
if a.UpperBound > b.UpperBound {
return +1
}
return 0
})
if !math.IsInf(buckets[len(buckets)-1].UpperBound, +1) {
return math.NaN()
}
buckets = coalesceBuckets(buckets)
count := buckets[len(buckets)-1].Count
if count == 0 || math.IsNaN(lower) || math.IsNaN(upper) {
return math.NaN()
}
if lower >= upper {
return 0
}
var (
rank, lowerRank, upperRank float64
lowerSet, upperSet bool
)
for i, b := range buckets {
lowerBound := math.Inf(-1)
if i > 0 {
lowerBound = buckets[i-1].UpperBound
}
upperBound := b.UpperBound
interpolateLinearly := func(v float64) float64 {
return rank + (b.Count-rank)*(v-lowerBound)/(upperBound-lowerBound)
}
if !lowerSet && lowerBound >= lower {
// We have hit the lower value at the lower bucket boundary.
lowerRank = rank
lowerSet = true
}
if !upperSet && lowerBound >= upper {
// We have hit the upper value at the lower bucket boundary.
upperRank = rank
upperSet = true
}
if lowerSet && upperSet {
break
}
if !lowerSet && lowerBound < lower && upperBound > lower {
// The lower value is in this bucket.
lowerRank = interpolateLinearly(lower)
lowerSet = true
}
if !upperSet && lowerBound < upper && upperBound > upper {
// The upper value is in this bucket.
upperRank = interpolateLinearly(upper)
upperSet = true
}
if lowerSet && upperSet {
break
}
rank = b.Count
}
if !lowerSet || lowerRank > count {
lowerRank = count
}
if !upperSet || upperRank > count {
upperRank = count
}
return (upperRank - lowerRank) / count
}
// coalesceBuckets merges buckets with the same upper bound. // coalesceBuckets merges buckets with the same upper bound.
// //
// The input buckets must be sorted. // The input buckets must be sorted.

View File

@ -384,7 +384,9 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query, sortSeries bool)
_ = httpResp.Body.Close() _ = httpResp.Body.Close()
cancel() cancel()
return nil, fmt.Errorf("remote server %s returned http status %s: %s", c.urlString, httpResp.Status, string(body)) errStr := strings.Trim(string(body), "\n")
err := errors.New(errStr)
return nil, fmt.Errorf("remote server %s returned http status %s: %w", c.urlString, httpResp.Status, err)
} }
contentType := httpResp.Header.Get("Content-Type") contentType := httpResp.Header.Get("Content-Type")

View File

@ -225,6 +225,7 @@ func TestReadClient(t *testing.T) {
expectedSamples [][]model.SamplePair expectedSamples [][]model.SamplePair
expectedErrorContains string expectedErrorContains string
sortSeries bool sortSeries bool
unwrap bool
}{ }{
{ {
name: "sorted sampled response", name: "sorted sampled response",
@ -336,6 +337,14 @@ func TestReadClient(t *testing.T) {
timeout: 5 * time.Millisecond, timeout: 5 * time.Millisecond,
expectedErrorContains: "context deadline exceeded: request timed out after 5ms", expectedErrorContains: "context deadline exceeded: request timed out after 5ms",
}, },
{
name: "unwrap error",
httpHandler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
http.Error(w, "test error", http.StatusBadRequest)
}),
expectedErrorContains: "test error",
unwrap: true,
},
} }
for _, test := range tests { for _, test := range tests {
@ -366,6 +375,10 @@ func TestReadClient(t *testing.T) {
ss, err := c.Read(context.Background(), query, test.sortSeries) ss, err := c.Read(context.Background(), query, test.sortSeries)
if test.expectedErrorContains != "" { if test.expectedErrorContains != "" {
require.ErrorContains(t, err, test.expectedErrorContains) require.ErrorContains(t, err, test.expectedErrorContains)
if test.unwrap {
err = errors.Unwrap(err)
require.EqualError(t, err, test.expectedErrorContains)
}
return return
} }

View File

@ -224,21 +224,19 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
return labels return labels
} }
// isValidAggregationTemporality checks whether an OTel metric has a valid func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporality, bool, error) {
// aggregation temporality for conversion to a Prometheus metric.
func isValidAggregationTemporality(metric pmetric.Metric) bool {
//exhaustive:enforce //exhaustive:enforce
switch metric.Type() { switch metric.Type() {
case pmetric.MetricTypeGauge, pmetric.MetricTypeSummary: case pmetric.MetricTypeGauge, pmetric.MetricTypeSummary:
return true return 0, false, nil
case pmetric.MetricTypeSum: case pmetric.MetricTypeSum:
return metric.Sum().AggregationTemporality() == pmetric.AggregationTemporalityCumulative return metric.Sum().AggregationTemporality(), true, nil
case pmetric.MetricTypeHistogram: case pmetric.MetricTypeHistogram:
return metric.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityCumulative return metric.Histogram().AggregationTemporality(), true, nil
case pmetric.MetricTypeExponentialHistogram: case pmetric.MetricTypeExponentialHistogram:
return metric.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityCumulative return metric.ExponentialHistogram().AggregationTemporality(), true, nil
} }
return false return 0, false, fmt.Errorf("could not get aggregation temporality for %s as it has unsupported metric type %s", metric.Name(), metric.Type())
} }
// addHistogramDataPoints adds OTel histogram data points to the corresponding Prometheus time series // addHistogramDataPoints adds OTel histogram data points to the corresponding Prometheus time series

View File

@ -37,6 +37,7 @@ const defaultZeroThreshold = 1e-128
// as native histogram samples. // as native histogram samples.
func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice, func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice,
resource pcommon.Resource, settings Settings, promName string, resource pcommon.Resource, settings Settings, promName string,
temporality pmetric.AggregationTemporality,
) (annotations.Annotations, error) { ) (annotations.Annotations, error) {
var annots annotations.Annotations var annots annotations.Annotations
for x := 0; x < dataPoints.Len(); x++ { for x := 0; x < dataPoints.Len(); x++ {
@ -46,7 +47,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
pt := dataPoints.At(x) pt := dataPoints.At(x)
histogram, ws, err := exponentialToNativeHistogram(pt) histogram, ws, err := exponentialToNativeHistogram(pt, temporality)
annots.Merge(ws) annots.Merge(ws)
if err != nil { if err != nil {
return annots, err return annots, err
@ -76,7 +77,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
// exponentialToNativeHistogram translates an OTel Exponential Histogram data point // exponentialToNativeHistogram translates an OTel Exponential Histogram data point
// to a Prometheus Native Histogram. // to a Prometheus Native Histogram.
func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, annotations.Annotations, error) { func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (prompb.Histogram, annotations.Annotations, error) {
var annots annotations.Annotations var annots annotations.Annotations
scale := p.Scale() scale := p.Scale()
if scale < -4 { if scale < -4 {
@ -94,7 +95,6 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prom
pSpans, pDeltas := convertBucketsLayout(p.Positive().BucketCounts().AsRaw(), p.Positive().Offset(), scaleDown, true) pSpans, pDeltas := convertBucketsLayout(p.Positive().BucketCounts().AsRaw(), p.Positive().Offset(), scaleDown, true)
nSpans, nDeltas := convertBucketsLayout(p.Negative().BucketCounts().AsRaw(), p.Negative().Offset(), scaleDown, true) nSpans, nDeltas := convertBucketsLayout(p.Negative().BucketCounts().AsRaw(), p.Negative().Offset(), scaleDown, true)
h := prompb.Histogram{
// The counter reset detection must be compatible with Prometheus to // The counter reset detection must be compatible with Prometheus to
// safely set ResetHint to NO. This is not ensured currently. // safely set ResetHint to NO. This is not ensured currently.
// Sending a sample that triggers counter reset but with ResetHint==NO // Sending a sample that triggers counter reset but with ResetHint==NO
@ -104,7 +104,18 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prom
// need to know here if it was used for the detection. // need to know here if it was used for the detection.
// Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303
// Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232
ResetHint: prompb.Histogram_UNKNOWN, resetHint := prompb.Histogram_UNKNOWN
if temporality == pmetric.AggregationTemporalityDelta {
// If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting.
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/).
// This might be changed to a different hint name as gauge type might be misleading for samples that should be
// summed over time.
resetHint = prompb.Histogram_GAUGE
}
h := prompb.Histogram{
ResetHint: resetHint,
Schema: scale, Schema: scale,
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: p.ZeroCount()}, ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: p.ZeroCount()},
@ -242,6 +253,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust
func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice,
resource pcommon.Resource, settings Settings, promName string, resource pcommon.Resource, settings Settings, promName string,
temporality pmetric.AggregationTemporality,
) (annotations.Annotations, error) { ) (annotations.Annotations, error) {
var annots annotations.Annotations var annots annotations.Annotations
@ -252,7 +264,7 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co
pt := dataPoints.At(x) pt := dataPoints.At(x)
histogram, ws, err := explicitHistogramToCustomBucketsHistogram(pt) histogram, ws, err := explicitHistogramToCustomBucketsHistogram(pt, temporality)
annots.Merge(ws) annots.Merge(ws)
if err != nil { if err != nil {
return annots, err return annots, err
@ -281,7 +293,7 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co
return annots, nil return annots, nil
} }
func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint) (prompb.Histogram, annotations.Annotations, error) { func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, temporality pmetric.AggregationTemporality) (prompb.Histogram, annotations.Annotations, error) {
var annots annotations.Annotations var annots annotations.Annotations
buckets := p.BucketCounts().AsRaw() buckets := p.BucketCounts().AsRaw()
@ -289,8 +301,6 @@ func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint) (pr
bucketCounts := buckets[offset:] bucketCounts := buckets[offset:]
positiveSpans, positiveDeltas := convertBucketsLayout(bucketCounts, int32(offset), 0, false) positiveSpans, positiveDeltas := convertBucketsLayout(bucketCounts, int32(offset), 0, false)
// TODO(carrieedwards): Add setting to limit maximum bucket count
h := prompb.Histogram{
// The counter reset detection must be compatible with Prometheus to // The counter reset detection must be compatible with Prometheus to
// safely set ResetHint to NO. This is not ensured currently. // safely set ResetHint to NO. This is not ensured currently.
// Sending a sample that triggers counter reset but with ResetHint==NO // Sending a sample that triggers counter reset but with ResetHint==NO
@ -300,7 +310,19 @@ func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint) (pr
// need to know here if it was used for the detection. // need to know here if it was used for the detection.
// Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303
// Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232
ResetHint: prompb.Histogram_UNKNOWN, resetHint := prompb.Histogram_UNKNOWN
if temporality == pmetric.AggregationTemporalityDelta {
// If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting.
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/).
// This might be changed to a different hint name as gauge type might be misleading for samples that should be
// summed over time.
resetHint = prompb.Histogram_GAUGE
}
// TODO(carrieedwards): Add setting to limit maximum bucket count
h := prompb.Histogram{
ResetHint: resetHint,
Schema: histogram.CustomBucketsSchema, Schema: histogram.CustomBucketsSchema,
PositiveSpans: positiveSpans, PositiveSpans: positiveSpans,

View File

@ -566,7 +566,7 @@ func TestExponentialToNativeHistogram(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
validateExponentialHistogramCount(t, tt.exponentialHist()) // Sanity check. validateExponentialHistogramCount(t, tt.exponentialHist()) // Sanity check.
got, annots, err := exponentialToNativeHistogram(tt.exponentialHist()) got, annots, err := exponentialToNativeHistogram(tt.exponentialHist(), pmetric.AggregationTemporalityCumulative)
if tt.wantErrMessage != "" { if tt.wantErrMessage != "" {
require.ErrorContains(t, err, tt.wantErrMessage) require.ErrorContains(t, err, tt.wantErrMessage)
return return
@ -769,6 +769,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
ExportCreatedMetric: true, ExportCreatedMetric: true,
}, },
otlptranslator.BuildCompliantMetricName(metric, "", true), otlptranslator.BuildCompliantMetricName(metric, "", true),
pmetric.AggregationTemporalityCumulative,
) )
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, annots) require.Empty(t, annots)
@ -972,7 +973,7 @@ func TestHistogramToCustomBucketsHistogram(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
validateHistogramCount(t, tt.hist()) validateHistogramCount(t, tt.hist())
got, annots, err := explicitHistogramToCustomBucketsHistogram(tt.hist()) got, annots, err := explicitHistogramToCustomBucketsHistogram(tt.hist(), pmetric.AggregationTemporalityCumulative)
if tt.wantErrMessage != "" { if tt.wantErrMessage != "" {
require.ErrorContains(t, err, tt.wantErrMessage) require.ErrorContains(t, err, tt.wantErrMessage)
return return
@ -1137,6 +1138,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
ConvertHistogramsToNHCB: true, ConvertHistogramsToNHCB: true,
}, },
otlptranslator.BuildCompliantMetricName(metric, "", true), otlptranslator.BuildCompliantMetricName(metric, "", true),
pmetric.AggregationTemporalityCumulative,
) )
require.NoError(t, err) require.NoError(t, err)

View File

@ -41,6 +41,7 @@ type Settings struct {
PromoteResourceAttributes []string PromoteResourceAttributes []string
KeepIdentifyingResourceAttributes bool KeepIdentifyingResourceAttributes bool
ConvertHistogramsToNHCB bool ConvertHistogramsToNHCB bool
AllowDeltaTemporality bool
} }
// PrometheusConverter converts from OTel write format to Prometheus remote write format. // PrometheusConverter converts from OTel write format to Prometheus remote write format.
@ -91,8 +92,18 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
metric := metricSlice.At(k) metric := metricSlice.At(k)
mostRecentTimestamp = max(mostRecentTimestamp, mostRecentTimestampInMetric(metric)) mostRecentTimestamp = max(mostRecentTimestamp, mostRecentTimestampInMetric(metric))
temporality, hasTemporality, err := aggregationTemporality(metric)
if err != nil {
errs = multierr.Append(errs, err)
continue
}
if !isValidAggregationTemporality(metric) { if hasTemporality &&
// Cumulative temporality is always valid.
// Delta temporality is also valid if AllowDeltaTemporality is true.
// All other temporality values are invalid.
!(temporality == pmetric.AggregationTemporalityCumulative ||
(settings.AllowDeltaTemporality && temporality == pmetric.AggregationTemporalityDelta)) {
errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name()))
continue continue
} }
@ -144,7 +155,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
break break
} }
if settings.ConvertHistogramsToNHCB { if settings.ConvertHistogramsToNHCB {
ws, err := c.addCustomBucketsHistogramDataPoints(ctx, dataPoints, resource, settings, promName) ws, err := c.addCustomBucketsHistogramDataPoints(ctx, dataPoints, resource, settings, promName, temporality)
annots.Merge(ws) annots.Merge(ws)
if err != nil { if err != nil {
errs = multierr.Append(errs, err) errs = multierr.Append(errs, err)
@ -172,6 +183,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
resource, resource,
settings, settings,
promName, promName,
temporality,
) )
annots.Merge(ws) annots.Merge(ws)
if err != nil { if err != nil {

View File

@ -19,6 +19,7 @@ package prometheusremotewrite
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"testing" "testing"
"time" "time"
@ -31,6 +32,7 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/util/testutil"
) )
func TestFromMetrics(t *testing.T) { func TestFromMetrics(t *testing.T) {
@ -235,6 +237,461 @@ func TestFromMetrics(t *testing.T) {
}) })
} }
func TestTemporality(t *testing.T) {
ts := time.Unix(100, 0)
tests := []struct {
name string
allowDelta bool
convertToNHCB bool
inputSeries []pmetric.Metric
expectedSeries []prompb.TimeSeries
expectedError string
}{
{
name: "all cumulative when delta not allowed",
allowDelta: false,
inputSeries: []pmetric.Metric{
createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts),
},
expectedSeries: []prompb.TimeSeries{
createPromFloatSeries("test_metric_1", ts),
createPromFloatSeries("test_metric_2", ts),
},
},
{
name: "all delta when allowed",
allowDelta: true,
inputSeries: []pmetric.Metric{
createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts),
},
expectedSeries: []prompb.TimeSeries{
createPromFloatSeries("test_metric_1", ts),
createPromFloatSeries("test_metric_2", ts),
},
},
{
name: "mixed temporality when delta allowed",
allowDelta: true,
inputSeries: []pmetric.Metric{
createOtelSum("test_metric_1", pmetric.AggregationTemporalityDelta, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityCumulative, ts),
},
expectedSeries: []prompb.TimeSeries{
createPromFloatSeries("test_metric_1", ts),
createPromFloatSeries("test_metric_2", ts),
},
},
{
name: "delta rejected when not allowed",
allowDelta: false,
inputSeries: []pmetric.Metric{
createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityDelta, ts),
},
expectedSeries: []prompb.TimeSeries{
createPromFloatSeries("test_metric_1", ts),
},
expectedError: `invalid temporality and type combination for metric "test_metric_2"`,
},
{
name: "unspecified temporality not allowed",
allowDelta: true,
inputSeries: []pmetric.Metric{
createOtelSum("test_metric_1", pmetric.AggregationTemporalityCumulative, ts),
createOtelSum("test_metric_2", pmetric.AggregationTemporalityUnspecified, ts),
},
expectedSeries: []prompb.TimeSeries{
createPromFloatSeries("test_metric_1", ts),
},
expectedError: `invalid temporality and type combination for metric "test_metric_2"`,
},
{
name: "cumulative histogram",
allowDelta: false,
inputSeries: []pmetric.Metric{
createOtelExponentialHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts),
},
expectedSeries: []prompb.TimeSeries{
createPromNativeHistogramSeries("test_histogram", prompb.Histogram_UNKNOWN, ts),
},
},
{
name: "delta histogram when allowed",
allowDelta: true,
inputSeries: []pmetric.Metric{
createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
expectedSeries: []prompb.TimeSeries{
createPromNativeHistogramSeries("test_histogram_1", prompb.Histogram_GAUGE, ts),
createPromNativeHistogramSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts),
},
},
{
name: "delta histogram when not allowed",
allowDelta: false,
inputSeries: []pmetric.Metric{
createOtelExponentialHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExponentialHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
expectedSeries: []prompb.TimeSeries{
createPromNativeHistogramSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts),
},
expectedError: `invalid temporality and type combination for metric "test_histogram_1"`,
},
{
name: "cumulative histogram with buckets",
allowDelta: false,
convertToNHCB: true,
inputSeries: []pmetric.Metric{
createOtelExplicitHistogram("test_histogram", pmetric.AggregationTemporalityCumulative, ts),
},
expectedSeries: []prompb.TimeSeries{
createPromNHCBSeries("test_histogram", prompb.Histogram_UNKNOWN, ts),
},
},
{
name: "delta histogram with buckets when allowed",
allowDelta: true,
convertToNHCB: true,
inputSeries: []pmetric.Metric{
createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
expectedSeries: []prompb.TimeSeries{
createPromNHCBSeries("test_histogram_1", prompb.Histogram_GAUGE, ts),
createPromNHCBSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts),
},
},
{
name: "delta histogram with buckets when not allowed",
allowDelta: false,
convertToNHCB: true,
inputSeries: []pmetric.Metric{
createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
expectedSeries: []prompb.TimeSeries{
createPromNHCBSeries("test_histogram_2", prompb.Histogram_UNKNOWN, ts),
},
expectedError: `invalid temporality and type combination for metric "test_histogram_1"`,
},
{
name: "delta histogram with buckets and convertToNHCB=false when not allowed",
allowDelta: false,
convertToNHCB: false,
inputSeries: []pmetric.Metric{
createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
expectedSeries: createPromClassicHistogramSeries("test_histogram_2", ts),
expectedError: `invalid temporality and type combination for metric "test_histogram_1"`,
},
{
name: "delta histogram with buckets and convertToNHCB=false when allowed",
allowDelta: true,
convertToNHCB: false,
inputSeries: []pmetric.Metric{
createOtelExplicitHistogram("test_histogram_1", pmetric.AggregationTemporalityDelta, ts),
createOtelExplicitHistogram("test_histogram_2", pmetric.AggregationTemporalityCumulative, ts),
},
expectedSeries: append(
createPromClassicHistogramSeries("test_histogram_1", ts),
createPromClassicHistogramSeries("test_histogram_2", ts)...,
),
},
{
name: "summary does not have temporality",
inputSeries: []pmetric.Metric{
createOtelSummary("test_summary_1", ts),
},
expectedSeries: createPromSummarySeries("test_summary_1", ts),
},
{
name: "gauge does not have temporality",
inputSeries: []pmetric.Metric{
createOtelGauge("test_gauge_1", ts),
},
expectedSeries: []prompb.TimeSeries{
createPromFloatSeries("test_gauge_1", ts),
},
},
{
name: "empty metric type errors",
inputSeries: []pmetric.Metric{
createOtelEmptyType("test_empty"),
},
expectedSeries: []prompb.TimeSeries{},
expectedError: `could not get aggregation temporality for test_empty as it has unsupported metric type Empty`,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
metrics := pmetric.NewMetrics()
rm := metrics.ResourceMetrics().AppendEmpty()
sm := rm.ScopeMetrics().AppendEmpty()
for _, s := range tc.inputSeries {
s.CopyTo(sm.Metrics().AppendEmpty())
}
c := NewPrometheusConverter()
settings := Settings{
AllowDeltaTemporality: tc.allowDelta,
ConvertHistogramsToNHCB: tc.convertToNHCB,
}
_, err := c.FromMetrics(context.Background(), metrics, settings)
if tc.expectedError != "" {
require.EqualError(t, err, tc.expectedError)
} else {
require.NoError(t, err)
}
series := c.TimeSeries()
// Sort series to make the test deterministic.
testutil.RequireEqual(t, sortTimeSeries(tc.expectedSeries), sortTimeSeries(series))
})
}
}
func createOtelSum(name string, temporality pmetric.AggregationTemporality, ts time.Time) pmetric.Metric {
metrics := pmetric.NewMetricSlice()
m := metrics.AppendEmpty()
m.SetName(name)
sum := m.SetEmptySum()
sum.SetAggregationTemporality(temporality)
dp := sum.DataPoints().AppendEmpty()
dp.SetDoubleValue(5)
dp.SetTimestamp(pcommon.NewTimestampFromTime(ts))
dp.Attributes().PutStr("test_label", "test_value")
return m
}
func createPromFloatSeries(name string, ts time.Time) prompb.TimeSeries {
return prompb.TimeSeries{
Labels: []prompb.Label{
{Name: "__name__", Value: name},
{Name: "test_label", Value: "test_value"},
},
Samples: []prompb.Sample{{
Value: 5,
Timestamp: ts.UnixMilli(),
}},
}
}
func createOtelGauge(name string, ts time.Time) pmetric.Metric {
metrics := pmetric.NewMetricSlice()
m := metrics.AppendEmpty()
m.SetName(name)
gauge := m.SetEmptyGauge()
dp := gauge.DataPoints().AppendEmpty()
dp.SetDoubleValue(5)
dp.SetTimestamp(pcommon.NewTimestampFromTime(ts))
dp.Attributes().PutStr("test_label", "test_value")
return m
}
func createOtelExponentialHistogram(name string, temporality pmetric.AggregationTemporality, ts time.Time) pmetric.Metric {
metrics := pmetric.NewMetricSlice()
m := metrics.AppendEmpty()
m.SetName(name)
hist := m.SetEmptyExponentialHistogram()
hist.SetAggregationTemporality(temporality)
dp := hist.DataPoints().AppendEmpty()
dp.SetCount(1)
dp.SetSum(5)
dp.SetTimestamp(pcommon.NewTimestampFromTime(ts))
dp.Attributes().PutStr("test_label", "test_value")
return m
}
func createPromNativeHistogramSeries(name string, hint prompb.Histogram_ResetHint, ts time.Time) prompb.TimeSeries {
return prompb.TimeSeries{
Labels: []prompb.Label{
{Name: "__name__", Value: name},
{Name: "test_label", Value: "test_value"},
},
Histograms: []prompb.Histogram{
{
Count: &prompb.Histogram_CountInt{CountInt: 1},
Sum: 5,
Schema: 0,
ZeroThreshold: 1e-128,
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0},
Timestamp: ts.UnixMilli(),
ResetHint: hint,
},
},
}
}
func createOtelExplicitHistogram(name string, temporality pmetric.AggregationTemporality, ts time.Time) pmetric.Metric {
metrics := pmetric.NewMetricSlice()
m := metrics.AppendEmpty()
m.SetName(name)
hist := m.SetEmptyHistogram()
hist.SetAggregationTemporality(temporality)
dp := hist.DataPoints().AppendEmpty()
dp.SetCount(20)
dp.SetSum(30)
dp.BucketCounts().FromRaw([]uint64{10, 10, 0})
dp.ExplicitBounds().FromRaw([]float64{1, 2})
dp.SetTimestamp(pcommon.NewTimestampFromTime(ts))
dp.Attributes().PutStr("test_label", "test_value")
return m
}
func createPromNHCBSeries(name string, hint prompb.Histogram_ResetHint, ts time.Time) prompb.TimeSeries {
return prompb.TimeSeries{
Labels: []prompb.Label{
{Name: "__name__", Value: name},
{Name: "test_label", Value: "test_value"},
},
Histograms: []prompb.Histogram{
{
Count: &prompb.Histogram_CountInt{CountInt: 20},
Sum: 30,
Schema: -53,
ZeroThreshold: 0,
ZeroCount: nil,
PositiveSpans: []prompb.BucketSpan{
{
Length: 3,
},
},
PositiveDeltas: []int64{10, 0, -10},
CustomValues: []float64{1, 2},
Timestamp: ts.UnixMilli(),
ResetHint: hint,
},
},
}
}
func createPromClassicHistogramSeries(name string, ts time.Time) []prompb.TimeSeries {
return []prompb.TimeSeries{
{
Labels: []prompb.Label{
{Name: "__name__", Value: name + "_bucket"},
{Name: "le", Value: "1"},
{Name: "test_label", Value: "test_value"},
},
Samples: []prompb.Sample{{Value: 10, Timestamp: ts.UnixMilli()}},
},
{
Labels: []prompb.Label{
{Name: "__name__", Value: name + "_bucket"},
{Name: "le", Value: "2"},
{Name: "test_label", Value: "test_value"},
},
Samples: []prompb.Sample{{Value: 20, Timestamp: ts.UnixMilli()}},
},
{
Labels: []prompb.Label{
{Name: "__name__", Value: name + "_bucket"},
{Name: "le", Value: "+Inf"},
{Name: "test_label", Value: "test_value"},
},
Samples: []prompb.Sample{{Value: 20, Timestamp: ts.UnixMilli()}},
},
{
Labels: []prompb.Label{
{Name: "__name__", Value: name + "_count"},
{Name: "test_label", Value: "test_value"},
},
Samples: []prompb.Sample{{Value: 20, Timestamp: ts.UnixMilli()}},
},
{
Labels: []prompb.Label{
{Name: "__name__", Value: name + "_sum"},
{Name: "test_label", Value: "test_value"},
},
Samples: []prompb.Sample{{Value: 30, Timestamp: ts.UnixMilli()}},
},
}
}
func createOtelSummary(name string, ts time.Time) pmetric.Metric {
metrics := pmetric.NewMetricSlice()
m := metrics.AppendEmpty()
m.SetName(name)
summary := m.SetEmptySummary()
dp := summary.DataPoints().AppendEmpty()
dp.SetCount(9)
dp.SetSum(18)
qv := dp.QuantileValues().AppendEmpty()
qv.SetQuantile(0.5)
qv.SetValue(2)
dp.SetTimestamp(pcommon.NewTimestampFromTime(ts))
dp.Attributes().PutStr("test_label", "test_value")
return m
}
func createPromSummarySeries(name string, ts time.Time) []prompb.TimeSeries {
return []prompb.TimeSeries{
{
Labels: []prompb.Label{
{Name: "__name__", Value: name + "_sum"},
{Name: "test_label", Value: "test_value"},
},
Samples: []prompb.Sample{{
Value: 18,
Timestamp: ts.UnixMilli(),
}},
},
{
Labels: []prompb.Label{
{Name: "__name__", Value: name + "_count"},
{Name: "test_label", Value: "test_value"},
},
Samples: []prompb.Sample{{
Value: 9,
Timestamp: ts.UnixMilli(),
}},
},
{
Labels: []prompb.Label{
{Name: "__name__", Value: name},
{Name: "quantile", Value: "0.5"},
{Name: "test_label", Value: "test_value"},
},
Samples: []prompb.Sample{{
Value: 2,
Timestamp: ts.UnixMilli(),
}},
},
}
}
func createOtelEmptyType(name string) pmetric.Metric {
metrics := pmetric.NewMetricSlice()
m := metrics.AppendEmpty()
m.SetName(name)
return m
}
func sortTimeSeries(series []prompb.TimeSeries) []prompb.TimeSeries {
for i := range series {
sort.Slice(series[i].Labels, func(j, k int) bool {
return series[i].Labels[j].Name < series[i].Labels[k].Name
})
}
sort.Slice(series, func(i, j int) bool {
return fmt.Sprint(series[i].Labels) < fmt.Sprint(series[j].Labels)
})
return series
}
func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
for _, resourceAttributeCount := range []int{0, 5, 50} { for _, resourceAttributeCount := range []int{0, 5, 50} {
b.Run(fmt.Sprintf("resource attribute count: %v", resourceAttributeCount), func(b *testing.B) { b.Run(fmt.Sprintf("resource attribute count: %v", resourceAttributeCount), func(b *testing.B) {

View File

@ -31,12 +31,27 @@ func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMeta
if otelMetric.Sum().IsMonotonic() { if otelMetric.Sum().IsMonotonic() {
metricType = prompb.MetricMetadata_COUNTER metricType = prompb.MetricMetadata_COUNTER
} }
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
// We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now.
if otelMetric.Sum().AggregationTemporality() == pmetric.AggregationTemporalityDelta {
metricType = prompb.MetricMetadata_UNKNOWN
}
return metricType return metricType
case pmetric.MetricTypeHistogram: case pmetric.MetricTypeHistogram:
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
// We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now.
if otelMetric.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta {
return prompb.MetricMetadata_UNKNOWN
}
return prompb.MetricMetadata_HISTOGRAM return prompb.MetricMetadata_HISTOGRAM
case pmetric.MetricTypeSummary: case pmetric.MetricTypeSummary:
return prompb.MetricMetadata_SUMMARY return prompb.MetricMetadata_SUMMARY
case pmetric.MetricTypeExponentialHistogram: case pmetric.MetricTypeExponentialHistogram:
if otelMetric.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta {
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
// We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now.
return prompb.MetricMetadata_UNKNOWN
}
return prompb.MetricMetadata_HISTOGRAM return prompb.MetricMetadata_HISTOGRAM
} }
return prompb.MetricMetadata_UNKNOWN return prompb.MetricMetadata_UNKNOWN

View File

@ -1669,7 +1669,7 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl
if err != nil { if err != nil {
s.qm.logger.Error("non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) s.qm.logger.Error("non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err)
} else if sampleDiff+exemplarDiff+histogramDiff > 0 { } else if sampleDiff+exemplarDiff+histogramDiff > 0 {
s.qm.logger.Error("we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) s.qm.logger.Error("we got 2xx status code from the Receiver yet statistics indicate some data was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff)
} }
// These counters are used to calculate the dynamic sharding, and as such // These counters are used to calculate the dynamic sharding, and as such

View File

@ -526,20 +526,30 @@ func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref stora
type OTLPOptions struct { type OTLPOptions struct {
// Convert delta samples to their cumulative equivalent by aggregating in-memory // Convert delta samples to their cumulative equivalent by aggregating in-memory
ConvertDelta bool ConvertDelta bool
// Store the raw delta samples as metrics with unknown type (we don't have a proper type for delta yet, therefore
// marking the metric type as unknown for now).
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
NativeDelta bool
} }
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
// writes them to the provided appendable. // writes them to the provided appendable.
func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler { func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler {
if opts.NativeDelta && opts.ConvertDelta {
// This should be validated when iterating through feature flags, so not expected to fail here.
panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time")
}
ex := &rwExporter{ ex := &rwExporter{
writeHandler: &writeHandler{ writeHandler: &writeHandler{
logger: logger, logger: logger,
appendable: appendable, appendable: appendable,
}, },
config: configFunc, config: configFunc,
allowDeltaTemporality: opts.NativeDelta,
} }
wh := &otlpWriteHandler{logger: logger, cumul: ex} wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex}
if opts.ConvertDelta { if opts.ConvertDelta {
fac := deltatocumulative.NewFactory() fac := deltatocumulative.NewFactory()
@ -547,7 +557,7 @@ func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendabl
ID: component.NewID(fac.Type()), ID: component.NewID(fac.Type()),
TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()}, TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()},
} }
d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.cumul) d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.defaultConsumer)
if err != nil { if err != nil {
// fac.CreateMetrics directly calls [deltatocumulativeprocessor.createMetricsProcessor], // fac.CreateMetrics directly calls [deltatocumulativeprocessor.createMetricsProcessor],
// which only errors if: // which only errors if:
@ -563,7 +573,7 @@ func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendabl
// deltatocumulative does not error on start. see above for panic reasoning // deltatocumulative does not error on start. see above for panic reasoning
panic(err) panic(err)
} }
wh.delta = d2c wh.d2cConsumer = d2c
} }
return wh return wh
@ -572,6 +582,7 @@ func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendabl
type rwExporter struct { type rwExporter struct {
*writeHandler *writeHandler
config func() config.Config config func() config.Config
allowDeltaTemporality bool
} }
func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
@ -579,11 +590,12 @@ func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) er
converter := otlptranslator.NewPrometheusConverter() converter := otlptranslator.NewPrometheusConverter()
annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{ annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
AddMetricSuffixes: true, AddMetricSuffixes: otlpCfg.TranslationStrategy != config.NoTranslation,
AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes, AllowUTF8: otlpCfg.TranslationStrategy != config.UnderscoreEscapingWithSuffixes,
PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes,
KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes, KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes,
ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB, ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB,
AllowDeltaTemporality: rw.allowDeltaTemporality,
}) })
if err != nil { if err != nil {
rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err)
@ -607,8 +619,8 @@ func (rw *rwExporter) Capabilities() consumer.Capabilities {
type otlpWriteHandler struct { type otlpWriteHandler struct {
logger *slog.Logger logger *slog.Logger
cumul consumer.Metrics // only cumulative defaultConsumer consumer.Metrics // stores deltas as-is
delta consumer.Metrics // delta capable d2cConsumer consumer.Metrics // converts deltas to cumulative
} }
func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@ -620,13 +632,15 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
md := req.Metrics() md := req.Metrics()
// if delta conversion enabled AND delta samples exist, use slower delta capable path // If deltatocumulative conversion enabled AND delta samples exist, use slower conversion path.
if h.delta != nil && hasDelta(md) { // While deltatocumulative can also accept cumulative metrics (and then just forwards them as-is), it currently
err = h.delta.ConsumeMetrics(r.Context(), md) // holds a sync.Mutex when entering ConsumeMetrics. This is slow and not necessary when ingesting cumulative metrics.
if h.d2cConsumer != nil && hasDelta(md) {
err = h.d2cConsumer.ConsumeMetrics(r.Context(), md)
} else { } else {
// deltatocumulative currently holds a sync.Mutex when entering ConsumeMetrics. // Otherwise use default consumer (alongside cumulative samples, this will accept delta samples and write as-is
// This is slow and not necessary when no delta samples exist anyways // if native-delta-support is enabled).
err = h.cumul.ConsumeMetrics(r.Context(), md) err = h.defaultConsumer.ConsumeMetrics(r.Context(), md)
} }
switch { switch {

View File

@ -382,7 +382,118 @@ func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
func TestOTLPWriteHandler(t *testing.T) { func TestOTLPWriteHandler(t *testing.T) {
exportRequest := generateOTLPWriteRequest() exportRequest := generateOTLPWriteRequest()
timestamp := time.Now()
for _, testCase := range []struct {
name string
otlpCfg config.OTLPConfig
expectedSamples []mockSample
}{
{
name: "NoTranslation",
otlpCfg: config.OTLPConfig{
TranslationStrategy: config.NoTranslation,
},
expectedSamples: []mockSample{
{
l: labels.New(labels.Label{Name: "__name__", Value: "test.counter"},
labels.Label{Name: "foo.bar", Value: "baz"},
labels.Label{Name: "instance", Value: "test-instance"},
labels.Label{Name: "job", Value: "test-service"}),
t: timestamp.UnixMilli(),
v: 10.0,
},
{
l: labels.New(
labels.Label{Name: "__name__", Value: "target_info"},
labels.Label{Name: "host.name", Value: "test-host"},
labels.Label{Name: "instance", Value: "test-instance"},
labels.Label{Name: "job", Value: "test-service"},
),
t: timestamp.UnixMilli(),
v: 1,
},
},
},
{
name: "UnderscoreEscapingWithSuffixes",
otlpCfg: config.OTLPConfig{
TranslationStrategy: config.UnderscoreEscapingWithSuffixes,
},
expectedSamples: []mockSample{
{
l: labels.New(labels.Label{Name: "__name__", Value: "test_counter_total"},
labels.Label{Name: "foo_bar", Value: "baz"},
labels.Label{Name: "instance", Value: "test-instance"},
labels.Label{Name: "job", Value: "test-service"}),
t: timestamp.UnixMilli(),
v: 10.0,
},
{
l: labels.New(
labels.Label{Name: "__name__", Value: "target_info"},
labels.Label{Name: "host_name", Value: "test-host"},
labels.Label{Name: "instance", Value: "test-instance"},
labels.Label{Name: "job", Value: "test-service"},
),
t: timestamp.UnixMilli(),
v: 1,
},
},
},
{
name: "NoUTF8EscapingWithSuffixes",
otlpCfg: config.OTLPConfig{
TranslationStrategy: config.NoUTF8EscapingWithSuffixes,
},
expectedSamples: []mockSample{
{
l: labels.New(labels.Label{Name: "__name__", Value: "test.counter_total"},
labels.Label{Name: "foo.bar", Value: "baz"},
labels.Label{Name: "instance", Value: "test-instance"},
labels.Label{Name: "job", Value: "test-service"}),
t: timestamp.UnixMilli(),
v: 10.0,
},
{
l: labels.New(
labels.Label{Name: "__name__", Value: "target_info"},
labels.Label{Name: "host.name", Value: "test-host"},
labels.Label{Name: "instance", Value: "test-instance"},
labels.Label{Name: "job", Value: "test-service"},
),
t: timestamp.UnixMilli(),
v: 1,
},
},
},
} {
t.Run(testCase.name, func(t *testing.T) {
appendable := handleOTLP(t, exportRequest, testCase.otlpCfg)
for _, sample := range testCase.expectedSamples {
requireContainsSample(t, appendable.samples, sample)
}
require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count)
require.Len(t, appendable.histograms, 1) // 1 (exponential histogram)
require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
})
}
}
func requireContainsSample(t *testing.T, actual []mockSample, expected mockSample) {
t.Helper()
for _, got := range actual {
if labels.Equal(expected.l, got.l) && expected.t == got.t && expected.v == got.v {
return
}
}
require.Fail(t, fmt.Sprintf("Sample not found: \n"+
"expected: %v\n"+
"actual : %v", expected, actual))
}
func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig) *mockAppendable {
buf, err := exportRequest.MarshalProto() buf, err := exportRequest.MarshalProto()
require.NoError(t, err) require.NoError(t, err)
@ -393,19 +504,16 @@ func TestOTLPWriteHandler(t *testing.T) {
appendable := &mockAppendable{} appendable := &mockAppendable{}
handler := NewOTLPWriteHandler(nil, nil, appendable, func() config.Config { handler := NewOTLPWriteHandler(nil, nil, appendable, func() config.Config {
return config.Config{ return config.Config{
OTLPConfig: config.DefaultOTLPConfig, OTLPConfig: otlpCfg,
} }
}, OTLPOptions{}) }, OTLPOptions{})
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
resp := recorder.Result() resp := recorder.Result()
require.Equal(t, http.StatusOK, resp.StatusCode) require.Equal(t, http.StatusOK, resp.StatusCode)
require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count) return appendable
require.Len(t, appendable.histograms, 1) // 1 (exponential histogram)
require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
} }
func generateOTLPWriteRequest() pmetricotlp.ExportRequest { func generateOTLPWriteRequest() pmetricotlp.ExportRequest {
@ -426,7 +534,7 @@ func generateOTLPWriteRequest() pmetricotlp.ExportRequest {
// Generate One Counter // Generate One Counter
counterMetric := scopeMetric.Metrics().AppendEmpty() counterMetric := scopeMetric.Metrics().AppendEmpty()
counterMetric.SetName("test-counter") counterMetric.SetName("test.counter")
counterMetric.SetDescription("test-counter-description") counterMetric.SetDescription("test-counter-description")
counterMetric.SetEmptySum() counterMetric.SetEmptySum()
counterMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) counterMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
@ -446,7 +554,7 @@ func generateOTLPWriteRequest() pmetricotlp.ExportRequest {
// Generate One Gauge // Generate One Gauge
gaugeMetric := scopeMetric.Metrics().AppendEmpty() gaugeMetric := scopeMetric.Metrics().AppendEmpty()
gaugeMetric.SetName("test-gauge") gaugeMetric.SetName("test.gauge")
gaugeMetric.SetDescription("test-gauge-description") gaugeMetric.SetDescription("test-gauge-description")
gaugeMetric.SetEmptyGauge() gaugeMetric.SetEmptyGauge()
@ -457,7 +565,7 @@ func generateOTLPWriteRequest() pmetricotlp.ExportRequest {
// Generate One Histogram // Generate One Histogram
histogramMetric := scopeMetric.Metrics().AppendEmpty() histogramMetric := scopeMetric.Metrics().AppendEmpty()
histogramMetric.SetName("test-histogram") histogramMetric.SetName("test.histogram")
histogramMetric.SetDescription("test-histogram-description") histogramMetric.SetDescription("test-histogram-description")
histogramMetric.SetEmptyHistogram() histogramMetric.SetEmptyHistogram()
histogramMetric.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) histogramMetric.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
@ -472,7 +580,7 @@ func generateOTLPWriteRequest() pmetricotlp.ExportRequest {
// Generate One Exponential-Histogram // Generate One Exponential-Histogram
exponentialHistogramMetric := scopeMetric.Metrics().AppendEmpty() exponentialHistogramMetric := scopeMetric.Metrics().AppendEmpty()
exponentialHistogramMetric.SetName("test-exponential-histogram") exponentialHistogramMetric.SetName("test.exponential.histogram")
exponentialHistogramMetric.SetDescription("test-exponential-histogram-description") exponentialHistogramMetric.SetDescription("test-exponential-histogram-description")
exponentialHistogramMetric.SetEmptyExponentialHistogram() exponentialHistogramMetric.SetEmptyExponentialHistogram()
exponentialHistogramMetric.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) exponentialHistogramMetric.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)

View File

@ -236,7 +236,8 @@ type DB struct {
appenderPool sync.Pool appenderPool sync.Pool
bufPool sync.Pool bufPool sync.Pool
// These pools are used during WAL replay. // These pools are only used during WAL replay and are reset at the end.
// NOTE: Adjust resetWALReplayResources() upon changes to the pools.
walReplaySeriesPool zeropool.Pool[[]record.RefSeries] walReplaySeriesPool zeropool.Pool[[]record.RefSeries]
walReplaySamplesPool zeropool.Pool[[]record.RefSample] walReplaySamplesPool zeropool.Pool[[]record.RefSample]
walReplayHistogramsPool zeropool.Pool[[]record.RefHistogramSample] walReplayHistogramsPool zeropool.Pool[[]record.RefHistogramSample]
@ -366,6 +367,7 @@ func validateOptions(opts *Options) *Options {
func (db *DB) replayWAL() error { func (db *DB) replayWAL() error {
db.logger.Info("replaying WAL, this may take a while", "dir", db.wal.Dir()) db.logger.Info("replaying WAL, this may take a while", "dir", db.wal.Dir())
defer db.resetWALReplayResources()
start := time.Now() start := time.Now()
dir, startFrom, err := wlog.LastCheckpoint(db.wal.Dir()) dir, startFrom, err := wlog.LastCheckpoint(db.wal.Dir())
@ -425,6 +427,13 @@ func (db *DB) replayWAL() error {
return nil return nil
} }
func (db *DB) resetWALReplayResources() {
db.walReplaySeriesPool = zeropool.Pool[[]record.RefSeries]{}
db.walReplaySamplesPool = zeropool.Pool[[]record.RefSample]{}
db.walReplayHistogramsPool = zeropool.Pool[[]record.RefHistogramSample]{}
db.walReplayFloatHistogramsPool = zeropool.Pool[[]record.RefFloatHistogramSample]{}
}
func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) { func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) {
var ( var (
syms = labels.NewSymbolTable() // One table for the whole WAL. syms = labels.NewSymbolTable() // One table for the whole WAL.

View File

@ -1486,7 +1486,7 @@ func (db *DB) compactBlocks() (err error) {
// long enough that we end up with a HEAD block that needs to be written. // long enough that we end up with a HEAD block that needs to be written.
// Check if that's the case and stop compactions early. // Check if that's the case and stop compactions early.
if db.head.compactable() && !db.waitingForCompactionDelay() { if db.head.compactable() && !db.waitingForCompactionDelay() {
db.logger.Warn("aborting block compactions to persit the head block") db.logger.Warn("aborting block compactions to persist the head block")
return nil return nil
} }

View File

@ -5041,7 +5041,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample
// Verify that the in-memory ooo chunk is empty. // Verify that the in-memory ooo chunk is empty.
checkEmptyOOOChunk := func(lbls labels.Labels) { checkEmptyOOOChunk := func(lbls labels.Labels) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Nil(t, ms.ooo) require.Nil(t, ms.ooo)
@ -5085,7 +5085,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample
// Verify that the in-memory ooo chunk is not empty. // Verify that the in-memory ooo chunk is not empty.
checkNonEmptyOOOChunk := func(lbls labels.Labels) { checkNonEmptyOOOChunk := func(lbls labels.Labels) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples()) require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
@ -5246,7 +5246,7 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen
// Checking that ooo chunk is not empty. // Checking that ooo chunk is not empty.
for _, lbls := range []labels.Labels{series1, series2} { for _, lbls := range []labels.Labels{series1, series2} {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples()) require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
@ -5274,7 +5274,7 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen
// Checking that ooo chunk is empty. // Checking that ooo chunk is empty.
for _, lbls := range []labels.Labels{series1, series2} { for _, lbls := range []labels.Labels{series1, series2} {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Nil(t, ms.ooo) require.Nil(t, ms.ooo)
@ -5357,7 +5357,7 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen
// Checking that ooo chunk is not empty. // Checking that ooo chunk is not empty.
for _, lbls := range []labels.Labels{series1, series2} { for _, lbls := range []labels.Labels{series1, series2} {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples()) require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
@ -5385,7 +5385,7 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen
// Checking that ooo chunk is empty. // Checking that ooo chunk is empty.
for _, lbls := range []labels.Labels{series1, series2} { for _, lbls := range []labels.Labels{series1, series2} {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Nil(t, ms.ooo) require.Nil(t, ms.ooo)
@ -5467,7 +5467,7 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
// Checking that there are some ooo m-map chunks. // Checking that there are some ooo m-map chunks.
for _, lbls := range []labels.Labels{series1, series2} { for _, lbls := range []labels.Labels{series1, series2} {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Len(t, ms.ooo.oooMmappedChunks, 2) require.Len(t, ms.ooo.oooMmappedChunks, 2)
@ -5486,7 +5486,7 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
// Check ooo m-map chunks again. // Check ooo m-map chunks again.
for _, lbls := range []labels.Labels{series1, series2} { for _, lbls := range []labels.Labels{series1, series2} {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Len(t, ms.ooo.oooMmappedChunks, 2) require.Len(t, ms.ooo.oooMmappedChunks, 2)
@ -5526,7 +5526,7 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
// Checking that ooo chunk is empty in Head. // Checking that ooo chunk is empty in Head.
for _, lbls := range []labels.Labels{series1, series2} { for _, lbls := range []labels.Labels{series1, series2} {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Nil(t, ms.ooo) require.Nil(t, ms.ooo)
@ -6835,7 +6835,7 @@ func testOOODisabled(t *testing.T, scenario sampleTypeScenario) {
_, err = os.ReadDir(path.Join(db.Dir(), wlog.WblDirName)) _, err = os.ReadDir(path.Join(db.Dir(), wlog.WblDirName))
require.True(t, os.IsNotExist(err)) require.True(t, os.IsNotExist(err))
ms, created, err := db.head.getOrCreate(s1.Hash(), s1) ms, created, err := db.head.getOrCreate(s1.Hash(), s1, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.NotNil(t, ms) require.NotNil(t, ms)
@ -6908,7 +6908,7 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
oooMint, oooMaxt := minutes(195), minutes(260) oooMint, oooMaxt := minutes(195), minutes(260)
// Collect the samples only present in the ooo m-map chunks. // Collect the samples only present in the ooo m-map chunks.
ms, created, err := db.head.getOrCreate(s1.Hash(), s1) ms, created, err := db.head.getOrCreate(s1.Hash(), s1, false)
require.False(t, created) require.False(t, created)
require.NoError(t, err) require.NoError(t, err)
var s1MmapSamples []chunks.Sample var s1MmapSamples []chunks.Sample
@ -7088,7 +7088,7 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) {
// Verify that the in-memory ooo chunk is empty. // Verify that the in-memory ooo chunk is empty.
checkEmptyOOOChunk := func(lbls labels.Labels) { checkEmptyOOOChunk := func(lbls labels.Labels) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Nil(t, ms.ooo) require.Nil(t, ms.ooo)
@ -7270,7 +7270,7 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) {
// Verify that the in-memory ooo chunk is not empty. // Verify that the in-memory ooo chunk is not empty.
checkNonEmptyOOOChunk := func(lbls labels.Labels) { checkNonEmptyOOOChunk := func(lbls labels.Labels) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples()) require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
@ -7594,7 +7594,7 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) {
require.Len(t, db.Blocks(), 3) require.Len(t, db.Blocks(), 3)
// Check that the ooo chunks were removed. // Check that the ooo chunks were removed.
ms, created, err := db.head.getOrCreate(series1.Hash(), series1) ms, created, err := db.head.getOrCreate(series1.Hash(), series1, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.Nil(t, ms.ooo) require.Nil(t, ms.ooo)

View File

@ -93,7 +93,8 @@ type Head struct {
bytesPool zeropool.Pool[[]byte] bytesPool zeropool.Pool[[]byte]
memChunkPool sync.Pool memChunkPool sync.Pool
// These pools are used during WAL/WBL replay. // These pools are only used during WAL/WBL replay and are reset at the end.
// NOTE: Adjust resetWLReplayResources() upon changes to the pools.
wlReplaySeriesPool zeropool.Pool[[]record.RefSeries] wlReplaySeriesPool zeropool.Pool[[]record.RefSeries]
wlReplaySamplesPool zeropool.Pool[[]record.RefSample] wlReplaySamplesPool zeropool.Pool[[]record.RefSample]
wlReplaytStonesPool zeropool.Pool[[]tombstones.Stone] wlReplaytStonesPool zeropool.Pool[[]tombstones.Stone]
@ -345,6 +346,17 @@ func (h *Head) resetInMemoryState() error {
return nil return nil
} }
func (h *Head) resetWLReplayResources() {
h.wlReplaySeriesPool = zeropool.Pool[[]record.RefSeries]{}
h.wlReplaySamplesPool = zeropool.Pool[[]record.RefSample]{}
h.wlReplaytStonesPool = zeropool.Pool[[]tombstones.Stone]{}
h.wlReplayExemplarsPool = zeropool.Pool[[]record.RefExemplar]{}
h.wlReplayHistogramsPool = zeropool.Pool[[]record.RefHistogramSample]{}
h.wlReplayFloatHistogramsPool = zeropool.Pool[[]record.RefFloatHistogramSample]{}
h.wlReplayMetadataPool = zeropool.Pool[[]record.RefMetadata]{}
h.wlReplayMmapMarkersPool = zeropool.Pool[[]record.RefMmapMarker]{}
}
type headMetrics struct { type headMetrics struct {
activeAppenders prometheus.Gauge activeAppenders prometheus.Gauge
series prometheus.GaugeFunc series prometheus.GaugeFunc
@ -629,6 +641,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second
// limits the ingested samples to the head min valid time. // limits the ingested samples to the head min valid time.
func (h *Head) Init(minValidTime int64) error { func (h *Head) Init(minValidTime int64) error {
h.minValidTime.Store(minValidTime) h.minValidTime.Store(minValidTime)
defer h.resetWLReplayResources()
defer func() { defer func() {
h.postings.EnsureOrder(h.opts.WALReplayConcurrency) h.postings.EnsureOrder(h.opts.WALReplayConcurrency)
}() }()
@ -1721,7 +1734,7 @@ func (h *Head) String() string {
return "head" return "head"
} }
func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, error) { func (h *Head) getOrCreate(hash uint64, lset labels.Labels, pendingCommit bool) (*memSeries, bool, error) {
// Just using `getOrCreateWithID` below would be semantically sufficient, but we'd create // Just using `getOrCreateWithID` below would be semantically sufficient, but we'd create
// a new series on every sample inserted via Add(), which causes allocations // a new series on every sample inserted via Add(), which causes allocations
// and makes our series IDs rather random and harder to compress in postings. // and makes our series IDs rather random and harder to compress in postings.
@ -1733,17 +1746,17 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e
// Optimistically assume that we are the first one to create the series. // Optimistically assume that we are the first one to create the series.
id := chunks.HeadSeriesRef(h.lastSeriesID.Inc()) id := chunks.HeadSeriesRef(h.lastSeriesID.Inc())
return h.getOrCreateWithID(id, hash, lset) return h.getOrCreateWithID(id, hash, lset, pendingCommit)
} }
func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) { func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels, pendingCommit bool) (*memSeries, bool, error) {
s, created, err := h.series.getOrSet(hash, lset, func() *memSeries { s, created, err := h.series.getOrSet(hash, lset, func() *memSeries {
shardHash := uint64(0) shardHash := uint64(0)
if h.opts.EnableSharding { if h.opts.EnableSharding {
shardHash = labels.StableHash(lset) shardHash = labels.StableHash(lset)
} }
return newMemSeries(lset, id, shardHash, h.opts.IsolationDisabled) return newMemSeries(lset, id, shardHash, h.opts.IsolationDisabled, pendingCommit)
}) })
if err != nil { if err != nil {
return nil, false, err return nil, false, err
@ -2184,12 +2197,13 @@ type memSeriesOOOFields struct {
firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0]. firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0].
} }
func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, isolationDisabled bool) *memSeries { func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, isolationDisabled, pendingCommit bool) *memSeries {
s := &memSeries{ s := &memSeries{
lset: lset, lset: lset,
ref: id, ref: id,
nextAt: math.MinInt64, nextAt: math.MinInt64,
shardHash: shardHash, shardHash: shardHash,
pendingCommit: pendingCommit,
} }
if !isolationDisabled { if !isolationDisabled {
s.txs = newTxRing(0) s.txs = newTxRing(0)

View File

@ -319,7 +319,8 @@ type headAppender struct {
headMaxt int64 // We track it here to not take the lock for every sample appended. headMaxt int64 // We track it here to not take the lock for every sample appended.
oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample. oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
series []record.RefSeries // New series held by this appender. seriesRefs []record.RefSeries // New series records held by this appender.
series []*memSeries // New series held by this appender (using corresponding slices indexes from seriesRefs)
samples []record.RefSample // New float samples held by this appender. samples []record.RefSample // New float samples held by this appender.
sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
histograms []record.RefHistogramSample // New histogram samples held by this appender. histograms []record.RefHistogramSample // New histogram samples held by this appender.
@ -461,15 +462,16 @@ func (a *headAppender) getOrCreate(lset labels.Labels) (s *memSeries, created bo
if l, dup := lset.HasDuplicateLabelNames(); dup { if l, dup := lset.HasDuplicateLabelNames(); dup {
return nil, false, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) return nil, false, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample)
} }
s, created, err = a.head.getOrCreate(lset.Hash(), lset) s, created, err = a.head.getOrCreate(lset.Hash(), lset, true)
if err != nil { if err != nil {
return nil, false, err return nil, false, err
} }
if created { if created {
a.series = append(a.series, record.RefSeries{ a.seriesRefs = append(a.seriesRefs, record.RefSeries{
Ref: s.ref, Ref: s.ref,
Labels: lset, Labels: lset,
}) })
a.series = append(a.series, s)
} }
return s, created, nil return s, created, nil
} }
@ -907,8 +909,8 @@ func (a *headAppender) log() error {
var rec []byte var rec []byte
var enc record.Encoder var enc record.Encoder
if len(a.series) > 0 { if len(a.seriesRefs) > 0 {
rec = enc.Series(a.series, buf) rec = enc.Series(a.seriesRefs, buf)
buf = rec[:0] buf = rec[:0]
if err := a.head.wal.Log(rec); err != nil { if err := a.head.wal.Log(rec); err != nil {
@ -1426,6 +1428,14 @@ func (a *headAppender) commitMetadata() {
} }
} }
func (a *headAppender) unmarkCreatedSeriesAsPendingCommit() {
for _, s := range a.series {
s.Lock()
s.pendingCommit = false
s.Unlock()
}
}
// Commit writes to the WAL and adds the data to the Head. // Commit writes to the WAL and adds the data to the Head.
// TODO(codesome): Refactor this method to reduce indentation and make it more readable. // TODO(codesome): Refactor this method to reduce indentation and make it more readable.
func (a *headAppender) Commit() (err error) { func (a *headAppender) Commit() (err error) {
@ -1479,6 +1489,8 @@ func (a *headAppender) Commit() (err error) {
a.commitHistograms(acc) a.commitHistograms(acc)
a.commitFloatHistograms(acc) a.commitFloatHistograms(acc)
a.commitMetadata() a.commitMetadata()
// Unmark all series as pending commit after all samples have been committed.
a.unmarkCreatedSeriesAsPendingCommit()
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected)) a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected))
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected)) a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected))
@ -1952,6 +1964,7 @@ func (a *headAppender) Rollback() (err error) {
defer a.head.metrics.activeAppenders.Dec() defer a.head.metrics.activeAppenders.Dec()
defer a.head.iso.closeAppend(a.appendID) defer a.head.iso.closeAppend(a.appendID)
defer a.head.putSeriesBuffer(a.sampleSeries) defer a.head.putSeriesBuffer(a.sampleSeries)
defer a.unmarkCreatedSeriesAsPendingCommit()
var series *memSeries var series *memSeries
for i := range a.samples { for i := range a.samples {

View File

@ -43,7 +43,7 @@ func BenchmarkHeadStripeSeriesCreate(b *testing.B) {
defer h.Close() defer h.Close()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(i))) h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(i)), false)
} }
} }
@ -62,7 +62,7 @@ func BenchmarkHeadStripeSeriesCreateParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
for pb.Next() { for pb.Next() {
i := count.Inc() i := count.Inc()
h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(int(i)))) h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(int(i))), false)
} }
}) })
} }
@ -82,7 +82,7 @@ func BenchmarkHeadStripeSeriesCreate_PreCreationFailure(b *testing.B) {
defer h.Close() defer h.Close()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(i))) h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(i)), false)
} }
} }

View File

@ -382,7 +382,7 @@ func TestMemSeries_chunk(t *testing.T) {
require.NoError(t, chunkDiskMapper.Close()) require.NoError(t, chunkDiskMapper.Close())
}() }()
series := newMemSeries(labels.EmptyLabels(), 1, 0, true) series := newMemSeries(labels.EmptyLabels(), 1, 0, true, false)
if tc.setup != nil { if tc.setup != nil {
tc.setup(t, series, chunkDiskMapper) tc.setup(t, series, chunkDiskMapper)

View File

@ -102,7 +102,7 @@ func BenchmarkCreateSeries(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for _, s := range series { for _, s := range series {
h.getOrCreate(s.Labels().Hash(), s.Labels()) h.getOrCreate(s.Labels().Hash(), s.Labels(), false)
} }
} }
@ -149,24 +149,29 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) {
} }
} }
func populateTestWL(t testing.TB, w *wlog.WL, recs []interface{}) { func populateTestWL(t testing.TB, w *wlog.WL, recs []interface{}, buf []byte) []byte {
var enc record.Encoder var enc record.Encoder
for _, r := range recs { for _, r := range recs {
buf = buf[:0]
switch v := r.(type) { switch v := r.(type) {
case []record.RefSeries: case []record.RefSeries:
require.NoError(t, w.Log(enc.Series(v, nil))) buf = enc.Series(v, buf)
case []record.RefSample: case []record.RefSample:
require.NoError(t, w.Log(enc.Samples(v, nil))) buf = enc.Samples(v, buf)
case []tombstones.Stone: case []tombstones.Stone:
require.NoError(t, w.Log(enc.Tombstones(v, nil))) buf = enc.Tombstones(v, buf)
case []record.RefExemplar: case []record.RefExemplar:
require.NoError(t, w.Log(enc.Exemplars(v, nil))) buf = enc.Exemplars(v, buf)
case []record.RefMmapMarker: case []record.RefMmapMarker:
require.NoError(t, w.Log(enc.MmapMarkers(v, nil))) buf = enc.MmapMarkers(v, buf)
case []record.RefMetadata: case []record.RefMetadata:
require.NoError(t, w.Log(enc.Metadata(v, nil))) buf = enc.Metadata(v, buf)
default:
continue
} }
require.NoError(t, w.Log(buf))
} }
return buf
} }
func readTestWAL(t testing.TB, dir string) (recs []interface{}) { func readTestWAL(t testing.TB, dir string) (recs []interface{}) {
@ -309,15 +314,16 @@ func BenchmarkLoadWLs(b *testing.B) {
// Write series. // Write series.
refSeries := make([]record.RefSeries, 0, c.seriesPerBatch) refSeries := make([]record.RefSeries, 0, c.seriesPerBatch)
var buf []byte
builder := labels.NewBuilder(labels.EmptyLabels())
for j := 1; j < labelsPerSeries; j++ {
builder.Set(defaultLabelName+strconv.Itoa(j), defaultLabelValue+strconv.Itoa(j))
}
for k := 0; k < c.batches; k++ { for k := 0; k < c.batches; k++ {
refSeries = refSeries[:0] refSeries = refSeries[:0]
for i := k * c.seriesPerBatch; i < (k+1)*c.seriesPerBatch; i++ { for i := k * c.seriesPerBatch; i < (k+1)*c.seriesPerBatch; i++ {
lbls := make(map[string]string, labelsPerSeries) builder.Set(defaultLabelName, strconv.Itoa(i))
lbls[defaultLabelName] = strconv.Itoa(i) refSeries = append(refSeries, record.RefSeries{Ref: chunks.HeadSeriesRef(i) * 101, Labels: builder.Labels()})
for j := 1; len(lbls) < labelsPerSeries; j++ {
lbls[defaultLabelName+strconv.Itoa(j)] = defaultLabelValue + strconv.Itoa(j)
}
refSeries = append(refSeries, record.RefSeries{Ref: chunks.HeadSeriesRef(i) * 101, Labels: labels.FromMap(lbls)})
} }
writeSeries := refSeries writeSeries := refSeries
@ -333,7 +339,7 @@ func BenchmarkLoadWLs(b *testing.B) {
writeSeries = newWriteSeries writeSeries = newWriteSeries
} }
populateTestWL(b, wal, []interface{}{writeSeries}) buf = populateTestWL(b, wal, []interface{}{writeSeries}, buf)
} }
// Write samples. // Write samples.
@ -359,7 +365,7 @@ func BenchmarkLoadWLs(b *testing.B) {
V: float64(i) * 100, V: float64(i) * 100,
}) })
} }
populateTestWL(b, wal, []interface{}{refSamples}) buf = populateTestWL(b, wal, []interface{}{refSamples}, buf)
} }
} }
@ -374,7 +380,7 @@ func BenchmarkLoadWLs(b *testing.B) {
} }
for k := 0; k < c.batches*c.seriesPerBatch; k++ { for k := 0; k < c.batches*c.seriesPerBatch; k++ {
// Create one mmapped chunk per series, with one sample at the given time. // Create one mmapped chunk per series, with one sample at the given time.
s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, 0, defaultIsolationDisabled) s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, 0, defaultIsolationDisabled, false)
s.append(c.mmappedChunkT, 42, 0, cOpts) s.append(c.mmappedChunkT, 42, 0, cOpts)
// There's only one head chunk because only a single sample is appended. mmapChunks() // There's only one head chunk because only a single sample is appended. mmapChunks()
// ignores the latest chunk, so we need to cut a new head chunk to guarantee the chunk with // ignores the latest chunk, so we need to cut a new head chunk to guarantee the chunk with
@ -398,7 +404,7 @@ func BenchmarkLoadWLs(b *testing.B) {
Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i)), Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i)),
}) })
} }
populateTestWL(b, wal, []interface{}{refExemplars}) buf = populateTestWL(b, wal, []interface{}{refExemplars}, buf)
} }
} }
@ -427,10 +433,10 @@ func BenchmarkLoadWLs(b *testing.B) {
}) })
} }
if shouldAddMarkers { if shouldAddMarkers {
populateTestWL(b, wbl, []interface{}{refMarkers}) populateTestWL(b, wbl, []interface{}{refMarkers}, buf)
} }
populateTestWL(b, wal, []interface{}{refSamples}) buf = populateTestWL(b, wal, []interface{}{refSamples}, buf)
populateTestWL(b, wbl, []interface{}{refSamples}) buf = populateTestWL(b, wbl, []interface{}{refSamples}, buf)
} }
} }
@ -739,7 +745,7 @@ func TestHead_ReadWAL(t *testing.T) {
require.NoError(t, head.Close()) require.NoError(t, head.Close())
}() }()
populateTestWL(t, w, entries) populateTestWL(t, w, entries, nil)
require.NoError(t, head.Init(math.MinInt64)) require.NoError(t, head.Init(math.MinInt64))
require.Equal(t, uint64(101), head.lastSeriesID.Load()) require.Equal(t, uint64(101), head.lastSeriesID.Load())
@ -895,7 +901,7 @@ func TestHead_KeepSeriesInWALCheckpoint(t *testing.T) {
{ {
name: "keep series still in the head", name: "keep series still in the head",
prepare: func(t *testing.T, h *Head) { prepare: func(t *testing.T, h *Head) {
_, _, err := h.getOrCreateWithID(chunks.HeadSeriesRef(existingRef), existingLbls.Hash(), existingLbls) _, _, err := h.getOrCreateWithID(chunks.HeadSeriesRef(existingRef), existingLbls.Hash(), existingLbls, false)
require.NoError(t, err) require.NoError(t, err)
}, },
seriesRef: chunks.HeadSeriesRef(existingRef), seriesRef: chunks.HeadSeriesRef(existingRef),
@ -971,6 +977,86 @@ func TestHead_ActiveAppenders(t *testing.T) {
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders)) require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
} }
func TestHead_RaceBetweenSeriesCreationAndGC(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
const totalSeries = 100_000
series := make([]labels.Labels, totalSeries)
for i := 0; i < totalSeries; i++ {
series[i] = labels.FromStrings("foo", strconv.Itoa(i))
}
done := atomic.NewBool(false)
go func() {
defer done.Store(true)
app := head.Appender(context.Background())
defer func() {
if err := app.Commit(); err != nil {
t.Errorf("Failed to commit: %v", err)
}
}()
for i := 0; i < totalSeries; i++ {
_, err := app.Append(0, series[i], 100, 1)
if err != nil {
t.Errorf("Failed to append: %v", err)
return
}
}
}()
// Don't check the atomic.Bool on all iterations in order to perform more gc iterations and make the race condition more likely.
for i := 1; i%128 != 0 || !done.Load(); i++ {
head.gc()
}
require.Equal(t, totalSeries, int(head.NumSeries()))
}
func TestHead_CanGarbagecollectSeriesCreatedWithoutSamples(t *testing.T) {
for op, finishTxn := range map[string]func(app storage.Appender) error{
"after commit": func(app storage.Appender) error { return app.Commit() },
"after rollback": func(app storage.Appender) error { return app.Rollback() },
} {
t.Run(op, func(t *testing.T) {
chunkRange := time.Hour.Milliseconds()
head, _ := newTestHead(t, chunkRange, compression.None, true)
t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
firstSampleTime := 10 * chunkRange
{
// Append first sample, it should init head max time to firstSampleTime.
app := head.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("lbl", "ok"), firstSampleTime, 1)
require.NoError(t, err)
require.NoError(t, app.Commit())
require.Equal(t, 1, int(head.NumSeries()))
}
// Append a sample in a time range that is not covered by the chunk range,
// We would create series first and then append no sample.
app := head.Appender(context.Background())
invalidSampleTime := firstSampleTime - chunkRange
_, err := app.Append(0, labels.FromStrings("foo", "bar"), invalidSampleTime, 2)
require.Error(t, err)
// These are our assumptions: we're not testing them, we're just checking them to make debugging a failed
// test easier if someone refactors the code and breaks these assumptions.
// If these assumptions fail after a refactor, feel free to remove them but make sure that the test is still what we intended to test.
require.NotErrorIs(t, err, storage.ErrOutOfBounds, "Failed to append sample shouldn't take the shortcut that returns storage.ErrOutOfBounds")
require.ErrorIs(t, err, storage.ErrTooOldSample, "Failed to append sample should return storage.ErrTooOldSample, because OOO window was enabled but this sample doesn't fall into it.")
// Do commit or rollback, depending on what we're testing.
require.NoError(t, finishTxn(app))
// Garbage-collect, since we finished the transaction and series has no samples, it should be collectable.
head.gc()
require.Equal(t, 1, int(head.NumSeries()))
})
}
}
func TestHead_UnknownWALRecord(t *testing.T) { func TestHead_UnknownWALRecord(t *testing.T) {
head, w := newTestHead(t, 1000, compression.None, false) head, w := newTestHead(t, 1000, compression.None, false)
w.Log([]byte{255, 42}) w.Log([]byte{255, 42})
@ -1025,7 +1111,7 @@ func BenchmarkHead_Truncate(b *testing.B) {
} }
allSeries[i] = labels.FromStrings(append(nameValues, "first", "a", "second", "a", "third", "a")...) allSeries[i] = labels.FromStrings(append(nameValues, "first", "a", "second", "a", "third", "a")...)
s, _, _ := h.getOrCreate(allSeries[i].Hash(), allSeries[i]) s, _, _ := h.getOrCreate(allSeries[i].Hash(), allSeries[i], false)
s.mmappedChunks = []*mmappedChunk{ s.mmappedChunks = []*mmappedChunk{
{minTime: 1000 * int64(i/churn), maxTime: 999 + 1000*int64(i/churn)}, {minTime: 1000 * int64(i/churn), maxTime: 999 + 1000*int64(i/churn)},
} }
@ -1062,10 +1148,10 @@ func TestHead_Truncate(t *testing.T) {
ctx := context.Background() ctx := context.Background()
s1, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1", "b", "1")) s1, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1", "b", "1"), false)
s2, _, _ := h.getOrCreate(2, labels.FromStrings("a", "2", "b", "1")) s2, _, _ := h.getOrCreate(2, labels.FromStrings("a", "2", "b", "1"), false)
s3, _, _ := h.getOrCreate(3, labels.FromStrings("a", "1", "b", "2")) s3, _, _ := h.getOrCreate(3, labels.FromStrings("a", "1", "b", "2"), false)
s4, _, _ := h.getOrCreate(4, labels.FromStrings("a", "2", "b", "2", "c", "1")) s4, _, _ := h.getOrCreate(4, labels.FromStrings("a", "2", "b", "2", "c", "1"), false)
s1.mmappedChunks = []*mmappedChunk{ s1.mmappedChunks = []*mmappedChunk{
{minTime: 0, maxTime: 999}, {minTime: 0, maxTime: 999},
@ -1162,7 +1248,7 @@ func TestMemSeries_truncateChunks(t *testing.T) {
}, },
} }
s := newMemSeries(labels.FromStrings("a", "b"), 1, 0, defaultIsolationDisabled) s := newMemSeries(labels.FromStrings("a", "b"), 1, 0, defaultIsolationDisabled, false)
for i := 0; i < 4000; i += 5 { for i := 0; i < 4000; i += 5 {
ok, _ := s.append(int64(i), float64(i), 0, cOpts) ok, _ := s.append(int64(i), float64(i), 0, cOpts)
@ -1303,7 +1389,7 @@ func TestMemSeries_truncateChunks_scenarios(t *testing.T) {
require.NoError(t, chunkDiskMapper.Close()) require.NoError(t, chunkDiskMapper.Close())
}() }()
series := newMemSeries(labels.EmptyLabels(), 1, 0, true) series := newMemSeries(labels.EmptyLabels(), 1, 0, true, false)
cOpts := chunkOpts{ cOpts := chunkOpts{
chunkDiskMapper: chunkDiskMapper, chunkDiskMapper: chunkDiskMapper,
@ -1383,7 +1469,7 @@ func TestHeadDeleteSeriesWithoutSamples(t *testing.T) {
require.NoError(t, head.Close()) require.NoError(t, head.Close())
}() }()
populateTestWL(t, w, entries) populateTestWL(t, w, entries, nil)
require.NoError(t, head.Init(math.MinInt64)) require.NoError(t, head.Init(math.MinInt64))
@ -1877,7 +1963,7 @@ func TestMemSeries_append(t *testing.T) {
samplesPerChunk: DefaultSamplesPerChunk, samplesPerChunk: DefaultSamplesPerChunk,
} }
s := newMemSeries(labels.Labels{}, 1, 0, defaultIsolationDisabled) s := newMemSeries(labels.Labels{}, 1, 0, defaultIsolationDisabled, false)
// Add first two samples at the very end of a chunk range and the next two // Add first two samples at the very end of a chunk range and the next two
// on and after it. // on and after it.
@ -1938,7 +2024,7 @@ func TestMemSeries_appendHistogram(t *testing.T) {
samplesPerChunk: DefaultSamplesPerChunk, samplesPerChunk: DefaultSamplesPerChunk,
} }
s := newMemSeries(labels.Labels{}, 1, 0, defaultIsolationDisabled) s := newMemSeries(labels.Labels{}, 1, 0, defaultIsolationDisabled, false)
histograms := tsdbutil.GenerateTestHistograms(4) histograms := tsdbutil.GenerateTestHistograms(4)
histogramWithOneMoreBucket := histograms[3].Copy() histogramWithOneMoreBucket := histograms[3].Copy()
@ -2000,7 +2086,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) {
samplesPerChunk: samplesPerChunk, samplesPerChunk: samplesPerChunk,
} }
s := newMemSeries(labels.Labels{}, 1, 0, defaultIsolationDisabled) s := newMemSeries(labels.Labels{}, 1, 0, defaultIsolationDisabled, false)
// At this slow rate, we will fill the chunk in two block durations. // At this slow rate, we will fill the chunk in two block durations.
slowRate := (DefaultBlockDuration * 2) / samplesPerChunk slowRate := (DefaultBlockDuration * 2) / samplesPerChunk
@ -2051,7 +2137,7 @@ func TestGCChunkAccess(t *testing.T) {
h.initTime(0) h.initTime(0)
s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1"), false)
// Appending 2 samples for the first chunk. // Appending 2 samples for the first chunk.
ok, chunkCreated := s.append(0, 0, 0, cOpts) ok, chunkCreated := s.append(0, 0, 0, cOpts)
@ -2110,7 +2196,7 @@ func TestGCSeriesAccess(t *testing.T) {
h.initTime(0) h.initTime(0)
s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1"), false)
// Appending 2 samples for the first chunk. // Appending 2 samples for the first chunk.
ok, chunkCreated := s.append(0, 0, 0, cOpts) ok, chunkCreated := s.append(0, 0, 0, cOpts)
@ -2463,7 +2549,7 @@ func TestHeadReadWriterRepair(t *testing.T) {
samplesPerChunk: DefaultSamplesPerChunk, samplesPerChunk: DefaultSamplesPerChunk,
} }
s, created, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) s, created, _ := h.getOrCreate(1, labels.FromStrings("a", "1"), false)
require.True(t, created, "series was not created") require.True(t, created, "series was not created")
for i := 0; i < 7; i++ { for i := 0; i < 7; i++ {
@ -2823,7 +2909,7 @@ func TestIsolationAppendIDZeroIsNoop(t *testing.T) {
samplesPerChunk: DefaultSamplesPerChunk, samplesPerChunk: DefaultSamplesPerChunk,
} }
s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1")) s, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1"), false)
ok, _ := s.append(0, 0, 0, cOpts) ok, _ := s.append(0, 0, 0, cOpts)
require.True(t, ok, "Series append failed.") require.True(t, ok, "Series append failed.")
@ -3406,7 +3492,7 @@ func TestIteratorSeekIntoBuffer(t *testing.T) {
samplesPerChunk: DefaultSamplesPerChunk, samplesPerChunk: DefaultSamplesPerChunk,
} }
s := newMemSeries(labels.Labels{}, 1, 0, defaultIsolationDisabled) s := newMemSeries(labels.Labels{}, 1, 0, defaultIsolationDisabled, false)
for i := 0; i < 7; i++ { for i := 0; i < 7; i++ {
ok, _ := s.append(int64(i), float64(i), 0, cOpts) ok, _ := s.append(int64(i), float64(i), 0, cOpts)
@ -4716,7 +4802,7 @@ func TestHistogramCounterResetHeader(t *testing.T) {
checkExpCounterResetHeader := func(newHeaders ...chunkenc.CounterResetHeader) { checkExpCounterResetHeader := func(newHeaders ...chunkenc.CounterResetHeader) {
expHeaders = append(expHeaders, newHeaders...) expHeaders = append(expHeaders, newHeaders...)
ms, _, err := head.getOrCreate(l.Hash(), l) ms, _, err := head.getOrCreate(l.Hash(), l, false)
require.NoError(t, err) require.NoError(t, err)
ms.mmapChunks(head.chunkDiskMapper) ms.mmapChunks(head.chunkDiskMapper)
require.Len(t, ms.mmappedChunks, len(expHeaders)-1) // One is the head chunk. require.Len(t, ms.mmappedChunks, len(expHeaders)-1) // One is the head chunk.
@ -4843,7 +4929,7 @@ func TestOOOHistogramCounterResetHeaders(t *testing.T) {
checkOOOExpCounterResetHeader := func(newChunks ...expOOOMmappedChunks) { checkOOOExpCounterResetHeader := func(newChunks ...expOOOMmappedChunks) {
expChunks = append(expChunks, newChunks...) expChunks = append(expChunks, newChunks...)
ms, _, err := head.getOrCreate(l.Hash(), l) ms, _, err := head.getOrCreate(l.Hash(), l, false)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, ms.ooo.oooMmappedChunks, len(expChunks)) require.Len(t, ms.ooo.oooMmappedChunks, len(expChunks))
@ -4986,7 +5072,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
var expResult []chunks.Sample var expResult []chunks.Sample
checkExpChunks := func(count int) { checkExpChunks := func(count int) {
ms, created, err := db.Head().getOrCreate(lbls.Hash(), lbls) ms, created, err := db.Head().getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.NotNil(t, ms) require.NotNil(t, ms)
@ -5290,7 +5376,7 @@ func testWBLReplay(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, h.Init(0)) // Replay happens here. require.NoError(t, h.Init(0)) // Replay happens here.
// Get the ooo samples from the Head. // Get the ooo samples from the Head.
ms, ok, err := h.getOrCreate(l.Hash(), l) ms, ok, err := h.getOrCreate(l.Hash(), l, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, ok) require.False(t, ok)
require.NotNil(t, ms) require.NotNil(t, ms)
@ -5359,7 +5445,7 @@ func testOOOMmapReplay(t *testing.T, scenario sampleTypeScenario) {
appendSample(mins) appendSample(mins)
} }
ms, ok, err := h.getOrCreate(l.Hash(), l) ms, ok, err := h.getOrCreate(l.Hash(), l, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, ok) require.False(t, ok)
require.NotNil(t, ms) require.NotNil(t, ms)
@ -5387,7 +5473,7 @@ func testOOOMmapReplay(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, h.Init(0)) // Replay happens here. require.NoError(t, h.Init(0)) // Replay happens here.
// Get the mmap chunks from the Head. // Get the mmap chunks from the Head.
ms, ok, err = h.getOrCreate(l.Hash(), l) ms, ok, err = h.getOrCreate(l.Hash(), l, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, ok) require.False(t, ok)
require.NotNil(t, ms) require.NotNil(t, ms)
@ -5442,7 +5528,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) {
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
require.Greater(t, prom_testutil.ToFloat64(h.metrics.chunksCreated), 4.0) require.Greater(t, prom_testutil.ToFloat64(h.metrics.chunksCreated), 4.0)
series, created, err := h.getOrCreate(seriesLabels.Hash(), seriesLabels) series, created, err := h.getOrCreate(seriesLabels.Hash(), seriesLabels, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created, "should already exist") require.False(t, created, "should already exist")
require.NotNil(t, series, "should return the series we created above") require.NotNil(t, series, "should return the series we created above")
@ -5459,7 +5545,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, h.Init(0)) require.NoError(t, h.Init(0))
series, created, err = h.getOrCreate(seriesLabels.Hash(), seriesLabels) series, created, err = h.getOrCreate(seriesLabels.Hash(), seriesLabels, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created, "should already exist") require.False(t, created, "should already exist")
require.NotNil(t, series, "should return the series we created above") require.NotNil(t, series, "should return the series we created above")
@ -5656,7 +5742,7 @@ func testOOOAppendWithNoSeries(t *testing.T, appendFunc func(appender storage.Ap
} }
verifyOOOSamples := func(lbls labels.Labels, expSamples int) { verifyOOOSamples := func(lbls labels.Labels, expSamples int) {
ms, created, err := h.getOrCreate(lbls.Hash(), lbls) ms, created, err := h.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.NotNil(t, ms) require.NotNil(t, ms)
@ -5667,7 +5753,7 @@ func testOOOAppendWithNoSeries(t *testing.T, appendFunc func(appender storage.Ap
} }
verifyInOrderSamples := func(lbls labels.Labels, expSamples int) { verifyInOrderSamples := func(lbls labels.Labels, expSamples int) {
ms, created, err := h.getOrCreate(lbls.Hash(), lbls) ms, created, err := h.getOrCreate(lbls.Hash(), lbls, false)
require.NoError(t, err) require.NoError(t, err)
require.False(t, created) require.False(t, created)
require.NotNil(t, ms) require.NotNil(t, ms)
@ -5795,7 +5881,7 @@ func TestGaugeHistogramWALAndChunkHeader(t *testing.T) {
checkHeaders := func() { checkHeaders := func() {
head.mmapHeadChunks() head.mmapHeadChunks()
ms, _, err := head.getOrCreate(l.Hash(), l) ms, _, err := head.getOrCreate(l.Hash(), l, false)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, ms.mmappedChunks, 3) require.Len(t, ms.mmappedChunks, 3)
expHeaders := []chunkenc.CounterResetHeader{ expHeaders := []chunkenc.CounterResetHeader{
@ -5870,7 +5956,7 @@ func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) {
appendHistogram(hists[4]) appendHistogram(hists[4])
checkHeaders := func() { checkHeaders := func() {
ms, _, err := head.getOrCreate(l.Hash(), l) ms, _, err := head.getOrCreate(l.Hash(), l, false)
require.NoError(t, err) require.NoError(t, err)
head.mmapHeadChunks() head.mmapHeadChunks()
require.Len(t, ms.mmappedChunks, 3) require.Len(t, ms.mmappedChunks, 3)

View File

@ -254,7 +254,7 @@ Outer:
switch v := d.(type) { switch v := d.(type) {
case []record.RefSeries: case []record.RefSeries:
for _, walSeries := range v { for _, walSeries := range v {
mSeries, created, err := h.getOrCreateWithID(walSeries.Ref, walSeries.Labels.Hash(), walSeries.Labels) mSeries, created, err := h.getOrCreateWithID(walSeries.Ref, walSeries.Labels.Hash(), walSeries.Labels, false)
if err != nil { if err != nil {
seriesCreationErr = err seriesCreationErr = err
break Outer break Outer
@ -1577,7 +1577,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie
localRefSeries := shardedRefSeries[idx] localRefSeries := shardedRefSeries[idx]
for csr := range rc { for csr := range rc {
series, _, err := h.getOrCreateWithID(csr.ref, csr.lset.Hash(), csr.lset) series, _, err := h.getOrCreateWithID(csr.ref, csr.lset.Hash(), csr.lset, false)
if err != nil { if err != nil {
errChan <- err errChan <- err
return return

View File

@ -306,7 +306,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
}() }()
require.NoError(t, h.Init(0)) require.NoError(t, h.Init(0))
s1, _, _ := h.getOrCreate(s1ID, s1Lset) s1, _, _ := h.getOrCreate(s1ID, s1Lset, false)
s1.ooo = &memSeriesOOOFields{} s1.ooo = &memSeriesOOOFields{}
// define our expected chunks, by looking at the expected ChunkIntervals and setting... // define our expected chunks, by looking at the expected ChunkIntervals and setting...

View File

@ -262,7 +262,7 @@ func NewAPI(
statsRenderer StatsRenderer, statsRenderer StatsRenderer,
rwEnabled bool, rwEnabled bool,
acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg, acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg,
otlpEnabled, otlpDeltaToCumulative bool, otlpEnabled, otlpDeltaToCumulative, otlpNativeDeltaIngestion bool,
ctZeroIngestionEnabled bool, ctZeroIngestionEnabled bool,
) *API { ) *API {
a := &API{ a := &API{
@ -310,7 +310,7 @@ func NewAPI(
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled) a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled)
} }
if otlpEnabled { if otlpEnabled {
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{ConvertDelta: otlpDeltaToCumulative}) a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{ConvertDelta: otlpDeltaToCumulative, NativeDelta: otlpNativeDeltaIngestion})
} }
return a return a

View File

@ -144,6 +144,7 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable) *route
false, false,
false, false,
false, false,
false,
) )
promRouter := route.New().WithPrefix("/api/v1") promRouter := route.New().WithPrefix("/api/v1")

View File

@ -1,7 +1,7 @@
{ {
"name": "@prometheus-io/mantine-ui", "name": "@prometheus-io/mantine-ui",
"private": true, "private": true,
"version": "0.302.1", "version": "0.303.0",
"type": "module", "type": "module",
"scripts": { "scripts": {
"start": "vite", "start": "vite",
@ -12,62 +12,62 @@
"test": "vitest" "test": "vitest"
}, },
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.18.4", "@codemirror/autocomplete": "^6.18.6",
"@codemirror/language": "^6.10.8", "@codemirror/language": "^6.11.0",
"@codemirror/lint": "^6.8.4", "@codemirror/lint": "^6.8.5",
"@codemirror/state": "^6.5.2", "@codemirror/state": "^6.5.2",
"@codemirror/view": "^6.36.4", "@codemirror/view": "^6.36.6",
"@floating-ui/dom": "^1.6.12", "@floating-ui/dom": "^1.6.13",
"@lezer/common": "^1.2.3", "@lezer/common": "^1.2.3",
"@lezer/highlight": "^1.2.1", "@lezer/highlight": "^1.2.1",
"@mantine/code-highlight": "^7.17.2", "@mantine/code-highlight": "^7.17.5",
"@mantine/core": "^7.17.2", "@mantine/core": "^7.17.5",
"@mantine/dates": "^7.17.2", "@mantine/dates": "^7.17.5",
"@mantine/hooks": "^7.17.2", "@mantine/hooks": "^7.17.5",
"@mantine/notifications": "^7.17.2", "@mantine/notifications": "^7.17.5",
"@microsoft/fetch-event-source": "^2.0.1", "@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1", "@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1", "@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.302.1", "@prometheus-io/codemirror-promql": "0.303.0",
"@reduxjs/toolkit": "^2.6.1", "@reduxjs/toolkit": "^2.7.0",
"@tabler/icons-react": "^3.31.0", "@tabler/icons-react": "^3.31.0",
"@tanstack/react-query": "^5.67.1", "@tanstack/react-query": "^5.74.7",
"@testing-library/jest-dom": "^6.6.3", "@testing-library/jest-dom": "^6.6.3",
"@testing-library/react": "^16.2.0", "@testing-library/react": "^16.3.0",
"@types/lodash": "^4.17.16", "@types/lodash": "^4.17.16",
"@types/sanitize-html": "^2.13.0", "@types/sanitize-html": "^2.15.0",
"@uiw/react-codemirror": "^4.23.10", "@uiw/react-codemirror": "^4.23.11",
"clsx": "^2.1.1", "clsx": "^2.1.1",
"dayjs": "^1.11.10", "dayjs": "^1.11.13",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"react": "^19.0.0", "react": "^19.1.0",
"react-dom": "^19.0.0", "react-dom": "^19.1.0",
"react-infinite-scroll-component": "^6.1.0", "react-infinite-scroll-component": "^6.1.0",
"react-redux": "^9.2.0", "react-redux": "^9.2.0",
"react-router-dom": "^7.4.0", "react-router-dom": "^7.5.2",
"sanitize-html": "^2.15.0", "sanitize-html": "^2.16.0",
"uplot": "^1.6.32", "uplot": "^1.6.32",
"uplot-react": "^1.2.2", "uplot-react": "^1.2.2",
"use-query-params": "^2.2.1" "use-query-params": "^2.2.1"
}, },
"devDependencies": { "devDependencies": {
"@eslint/compat": "^1.2.4", "@eslint/compat": "^1.2.8",
"@eslint/eslintrc": "^3.3.1", "@eslint/eslintrc": "^3.3.1",
"@eslint/js": "^9.21.0", "@eslint/js": "^9.25.1",
"@types/react": "^19.0.12", "@types/react": "^19.1.2",
"@types/react-dom": "^19.0.4", "@types/react-dom": "^19.1.2",
"@typescript-eslint/eslint-plugin": "^8.20.0", "@typescript-eslint/eslint-plugin": "^8.31.0",
"@typescript-eslint/parser": "^8.25.0", "@typescript-eslint/parser": "^8.31.0",
"@vitejs/plugin-react": "^4.3.4", "@vitejs/plugin-react": "^4.4.1",
"eslint": "^9.23.0", "eslint": "^9.25.1",
"eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-hooks": "^5.2.0",
"eslint-plugin-react-refresh": "^0.4.19", "eslint-plugin-react-refresh": "^0.4.20",
"globals": "^16.0.0", "globals": "^16.0.0",
"jsdom": "^25.0.1", "jsdom": "^25.0.1",
"postcss": "^8.4.47", "postcss": "^8.5.3",
"postcss-preset-mantine": "^1.17.0", "postcss-preset-mantine": "^1.17.0",
"postcss-simple-vars": "^7.0.1", "postcss-simple-vars": "^7.0.1",
"vite": "^6.2.4", "vite": "^6.3.3",
"vitest": "^3.0.8" "vitest": "^3.1.2"
} }
} }

View File

@ -216,12 +216,12 @@ export default function AlertsPage() {
// convenient to have in the same file IMO). // convenient to have in the same file IMO).
const renderedPageItems = useMemo( const renderedPageItems = useMemo(
() => () =>
currentPageGroups.map((g, i) => ( currentPageGroups.map((g) => (
<Card <Card
shadow="xs" shadow="xs"
withBorder withBorder
p="md" p="md"
key={i} // TODO: Find a stable and definitely unique key. key={`${g.file}-${g.name}`}
> >
<Group mb="sm" justify="space-between"> <Group mb="sm" justify="space-between">
<Group align="baseline"> <Group align="baseline">

View File

@ -86,12 +86,13 @@ export default function RulesPage() {
(effectiveActivePage - 1) * ruleGroupsPerPage, (effectiveActivePage - 1) * ruleGroupsPerPage,
effectiveActivePage * ruleGroupsPerPage effectiveActivePage * ruleGroupsPerPage
) )
.map((g, i) => ( .map((g) => (
<Card <Card
shadow="xs" shadow="xs"
withBorder withBorder
p="md" p="md"
key={i} // TODO: Find a stable and definitely unique key. mb="md"
key={`${g.file}-${g.name}`}
> >
<Group mb="sm" justify="space-between"> <Group mb="sm" justify="space-between">
<Group align="baseline"> <Group align="baseline">

View File

@ -1266,9 +1266,11 @@ const funcDocs: Record<string, React.ReactNode> = {
</p> </p>
<p> <p>
<code>histogram_stddev(v instant-vector)</code> returns the estimated standard deviation of observations in a native <code>histogram_stddev(v instant-vector)</code> returns the estimated standard deviation of observations in a native
histogram, based on the geometric mean of the buckets where the observations lie. Samples that are not native histogram. For this estimation, all observations in a bucket are assumed to have the value of the mean of the bucket boundaries.
histograms are ignored and do not show up in the returned vector. For the zero bucket and for buckets with custom boundaries, the arithmetic mean is used. For the usual exponential buckets,
the geometric mean is used. Samples that are not native histograms are ignored and do not show up in the returned vector.
</p> </p>
<p> <p>

View File

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.302.1", "version": "0.303.0",
"description": "a CodeMirror mode for the PromQL language", "description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts", "types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js", "module": "dist/esm/index.js",
@ -29,21 +29,21 @@
}, },
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.302.1", "@prometheus-io/lezer-promql": "0.303.0",
"lru-cache": "^11.0.2" "lru-cache": "^11.1.0"
}, },
"devDependencies": { "devDependencies": {
"@codemirror/autocomplete": "^6.18.4", "@codemirror/autocomplete": "^6.18.6",
"@codemirror/language": "^6.10.8", "@codemirror/language": "^6.11.0",
"@codemirror/lint": "^6.8.4", "@codemirror/lint": "^6.8.5",
"@codemirror/state": "^6.5.2", "@codemirror/state": "^6.5.2",
"@codemirror/view": "^6.36.4", "@codemirror/view": "^6.36.6",
"@lezer/common": "^1.2.3", "@lezer/common": "^1.2.3",
"@lezer/highlight": "^1.2.0", "@lezer/highlight": "^1.2.1",
"@lezer/lr": "^1.4.2", "@lezer/lr": "^1.4.2",
"eslint-plugin-prettier": "^5.2.3", "eslint-plugin-prettier": "^5.2.6",
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"nock": "^14.0.1" "nock": "^14.0.4"
}, },
"peerDependencies": { "peerDependencies": {
"@codemirror/autocomplete": "^6.4.0", "@codemirror/autocomplete": "^6.4.0",

View File

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.302.1", "version": "0.303.0",
"description": "lezer-based PromQL grammar", "description": "lezer-based PromQL grammar",
"main": "dist/index.cjs", "main": "dist/index.cjs",
"type": "module", "type": "module",
@ -31,10 +31,10 @@
"test": "NODE_OPTIONS=--experimental-vm-modules jest" "test": "NODE_OPTIONS=--experimental-vm-modules jest"
}, },
"devDependencies": { "devDependencies": {
"@lezer/generator": "^1.7.2", "@lezer/generator": "^1.7.3",
"@lezer/highlight": "^1.2.0", "@lezer/highlight": "^1.2.1",
"@lezer/lr": "^1.4.2", "@lezer/lr": "^1.4.2",
"@rollup/plugin-node-resolve": "^16.0.0" "@rollup/plugin-node-resolve": "^16.0.1"
}, },
"peerDependencies": { "peerDependencies": {
"@lezer/highlight": "^1.1.2", "@lezer/highlight": "^1.1.2",

1237
web/ui/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
{ {
"name": "prometheus-io", "name": "prometheus-io",
"description": "Monorepo for the Prometheus UI", "description": "Monorepo for the Prometheus UI",
"version": "0.302.1", "version": "0.303.0",
"private": true, "private": true,
"scripts": { "scripts": {
"build": "bash build_ui.sh --all", "build": "bash build_ui.sh --all",
@ -16,12 +16,12 @@
], ],
"devDependencies": { "devDependencies": {
"@types/jest": "^29.5.14", "@types/jest": "^29.5.14",
"@typescript-eslint/eslint-plugin": "^8.20.0", "@typescript-eslint/eslint-plugin": "^8.31.0",
"@typescript-eslint/parser": "^8.25.0", "@typescript-eslint/parser": "^8.31.0",
"eslint-config-prettier": "^10.1.1", "eslint-config-prettier": "^10.1.2",
"prettier": "^3.4.2", "prettier": "^3.5.3",
"ts-jest": "^29.2.2", "ts-jest": "^29.3.2",
"typescript": "^5.7.2", "typescript": "^5.8.3",
"vite": "^6.2.4" "vite": "^6.3.3"
} }
} }

View File

@ -9,12 +9,12 @@
"version": "0.300.1", "version": "0.300.1",
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.18.6", "@codemirror/autocomplete": "^6.18.6",
"@codemirror/commands": "^6.8.0", "@codemirror/commands": "^6.8.1",
"@codemirror/language": "^6.11.0", "@codemirror/language": "^6.11.0",
"@codemirror/lint": "^6.8.4", "@codemirror/lint": "^6.8.5",
"@codemirror/search": "^6.5.10", "@codemirror/search": "^6.5.10",
"@codemirror/state": "^6.5.2", "@codemirror/state": "^6.5.2",
"@codemirror/view": "^6.36.4", "@codemirror/view": "^6.36.6",
"@forevolve/bootstrap-dark": "^4.0.2", "@forevolve/bootstrap-dark": "^4.0.2",
"@fortawesome/fontawesome-svg-core": "6.7.2", "@fortawesome/fontawesome-svg-core": "6.7.2",
"@fortawesome/free-solid-svg-icons": "6.7.2", "@fortawesome/free-solid-svg-icons": "6.7.2",
@ -24,16 +24,16 @@
"@lezer/lr": "^1.4.2", "@lezer/lr": "^1.4.2",
"@nexucis/fuzzy": "^0.5.1", "@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1", "@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.302.1", "@prometheus-io/codemirror-promql": "0.303.0",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^9.0.9", "downshift": "^9.0.9",
"http-proxy-middleware": "^3.0.3", "http-proxy-middleware": "^3.0.5",
"jquery": "^3.7.1", "jquery": "^3.7.1",
"jquery.flot.tooltip": "^0.9.0", "jquery.flot.tooltip": "^0.9.0",
"moment": "^2.30.1", "moment": "^2.30.1",
"moment-timezone": "^0.5.47", "moment-timezone": "^0.5.48",
"popper.js": "^1.14.3", "popper.js": "^1.16.1",
"react": "^17.0.2", "react": "^17.0.2",
"react-copy-to-clipboard": "^5.1.0", "react-copy-to-clipboard": "^5.1.0",
"react-dom": "^17.0.2", "react-dom": "^17.0.2",
@ -42,8 +42,8 @@
"react-router-dom": "^5.3.4", "react-router-dom": "^5.3.4",
"react-test-renderer": "^17.0.2", "react-test-renderer": "^17.0.2",
"reactstrap": "^8.10.1", "reactstrap": "^8.10.1",
"sanitize-html": "^2.15.0", "sanitize-html": "^2.16.0",
"sass": "1.86.0", "sass": "1.87.0",
"tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-bootstrap-4": "^5.39.2",
"tempusdominus-core": "^5.19.3" "tempusdominus-core": "^5.19.3"
}, },
@ -54,26 +54,26 @@
"@types/flot": "0.0.36", "@types/flot": "0.0.36",
"@types/jest": "^29.5.14", "@types/jest": "^29.5.14",
"@types/jquery": "^3.5.32", "@types/jquery": "^3.5.32",
"@types/node": "^22.13.11", "@types/node": "^22.15.2",
"@types/react": "^17.0.71", "@types/react": "^17.0.85",
"@types/react-copy-to-clipboard": "^5.0.7", "@types/react-copy-to-clipboard": "^5.0.7",
"@types/react-dom": "^17.0.25", "@types/react-dom": "^17.0.26",
"@types/react-router-dom": "^5.3.3", "@types/react-router-dom": "^5.3.3",
"@types/sanitize-html": "^2.13.0", "@types/sanitize-html": "^2.15.0",
"@types/sinon": "^17.0.4", "@types/sinon": "^17.0.4",
"@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0",
"enzyme": "^3.11.0", "enzyme": "^3.11.0",
"enzyme-to-json": "^3.6.2", "enzyme-to-json": "^3.6.2",
"eslint-config-prettier": "^10.1.1", "eslint-config-prettier": "^10.1.2",
"eslint-config-react-app": "^7.0.1", "eslint-config-react-app": "^7.0.1",
"eslint-plugin-prettier": "^5.2.3", "eslint-plugin-prettier": "^5.2.6",
"jest-canvas-mock": "^2.5.2", "jest-canvas-mock": "^2.5.2",
"jest-fetch-mock": "^3.0.3", "jest-fetch-mock": "^3.0.3",
"mutationobserver-shim": "^0.3.7", "mutationobserver-shim": "^0.3.7",
"prettier": "^3.5.3", "prettier": "^3.5.3",
"react-scripts": "^5.0.1", "react-scripts": "^5.0.1",
"sinon": "^19.0.4", "sinon": "^19.0.5",
"ts-jest": "^29.2.6" "ts-jest": "^29.3.2"
}, },
"optionalDependencies": { "optionalDependencies": {
"fsevents": "^2.3.3" "fsevents": "^2.3.3"
@ -114,12 +114,14 @@
} }
}, },
"node_modules/@babel/code-frame": { "node_modules/@babel/code-frame": {
"version": "7.24.2", "version": "7.26.2",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.2.tgz", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz",
"integrity": "sha512-y5+tLQyV8pg3fsiln67BVLD1P13Eg4lh5RW9mF0zUuvLrv9uIQ4MCL+CRT+FTsBlBjcIan6PGsLcBN0m3ClUyQ==", "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==",
"dev": true, "dev": true,
"license": "MIT",
"dependencies": { "dependencies": {
"@babel/highlight": "^7.24.2", "@babel/helper-validator-identifier": "^7.25.9",
"js-tokens": "^4.0.0",
"picocolors": "^1.0.0" "picocolors": "^1.0.0"
}, },
"engines": { "engines": {
@ -526,19 +528,21 @@
} }
}, },
"node_modules/@babel/helper-string-parser": { "node_modules/@babel/helper-string-parser": {
"version": "7.24.1", "version": "7.25.9",
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.1.tgz", "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz",
"integrity": "sha512-2ofRCjnnA9y+wk8b9IAREroeUP02KHp431N2mhKniy2yKIDKpbrHv9eXwm8cBeWQYcJmzv5qKCu65P47eCF7CQ==", "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==",
"dev": true, "dev": true,
"license": "MIT",
"engines": { "engines": {
"node": ">=6.9.0" "node": ">=6.9.0"
} }
}, },
"node_modules/@babel/helper-validator-identifier": { "node_modules/@babel/helper-validator-identifier": {
"version": "7.22.20", "version": "7.25.9",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz",
"integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==",
"dev": true, "dev": true,
"license": "MIT",
"engines": { "engines": {
"node": ">=6.9.0" "node": ">=6.9.0"
} }
@ -567,39 +571,28 @@
} }
}, },
"node_modules/@babel/helpers": { "node_modules/@babel/helpers": {
"version": "7.24.4", "version": "7.27.0",
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.24.4.tgz", "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.0.tgz",
"integrity": "sha512-FewdlZbSiwaVGlgT1DPANDuCHaDMiOo+D/IDYRFYjHOuv66xMSJ7fQwwODwRNAPkADIO/z1EoF/l2BCWlWABDw==", "integrity": "sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==",
"dev": true, "dev": true,
"license": "MIT",
"dependencies": { "dependencies": {
"@babel/template": "^7.24.0", "@babel/template": "^7.27.0",
"@babel/traverse": "^7.24.1", "@babel/types": "^7.27.0"
"@babel/types": "^7.24.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/highlight": {
"version": "7.24.2",
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.2.tgz",
"integrity": "sha512-Yac1ao4flkTxTteCDZLEvdxg2fZfz1v8M4QpaGypq/WPDqg3ijHYbDfs+LG5hvzSoqaSZ9/Z9lKSP3CjZjv+pA==",
"dev": true,
"dependencies": {
"@babel/helper-validator-identifier": "^7.22.20",
"chalk": "^2.4.2",
"js-tokens": "^4.0.0",
"picocolors": "^1.0.0"
}, },
"engines": { "engines": {
"node": ">=6.9.0" "node": ">=6.9.0"
} }
}, },
"node_modules/@babel/parser": { "node_modules/@babel/parser": {
"version": "7.24.4", "version": "7.27.0",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.4.tgz", "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.0.tgz",
"integrity": "sha512-zTvEBcghmeBma9QIGunWevvBAp4/Qu9Bdq+2k0Ot4fVMD6v3dsC9WOcRSKk7tRRyBM/53yKMJko9xOatGQAwSg==", "integrity": "sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==",
"dev": true, "dev": true,
"license": "MIT",
"dependencies": {
"@babel/types": "^7.27.0"
},
"bin": { "bin": {
"parser": "bin/babel-parser.js" "parser": "bin/babel-parser.js"
}, },
@ -2202,14 +2195,15 @@
} }
}, },
"node_modules/@babel/template": { "node_modules/@babel/template": {
"version": "7.24.0", "version": "7.27.0",
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.0.tgz", "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.0.tgz",
"integrity": "sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA==", "integrity": "sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==",
"dev": true, "dev": true,
"license": "MIT",
"dependencies": { "dependencies": {
"@babel/code-frame": "^7.23.5", "@babel/code-frame": "^7.26.2",
"@babel/parser": "^7.24.0", "@babel/parser": "^7.27.0",
"@babel/types": "^7.24.0" "@babel/types": "^7.27.0"
}, },
"engines": { "engines": {
"node": ">=6.9.0" "node": ">=6.9.0"
@ -2237,14 +2231,14 @@
} }
}, },
"node_modules/@babel/types": { "node_modules/@babel/types": {
"version": "7.24.0", "version": "7.27.0",
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.0.tgz", "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.0.tgz",
"integrity": "sha512-+j7a5c253RfKh8iABBhywc8NSfP5LURe7Uh4qpsh6jc+aLJguvmIUBdjSdEMQv2bENrCR5MfRdjGo7vzS/ob7w==", "integrity": "sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==",
"dev": true, "dev": true,
"license": "MIT",
"dependencies": { "dependencies": {
"@babel/helper-string-parser": "^7.23.4", "@babel/helper-string-parser": "^7.25.9",
"@babel/helper-validator-identifier": "^7.22.20", "@babel/helper-validator-identifier": "^7.25.9"
"to-fast-properties": "^2.0.0"
}, },
"engines": { "engines": {
"node": ">=6.9.0" "node": ">=6.9.0"
@ -2269,9 +2263,9 @@
} }
}, },
"node_modules/@codemirror/commands": { "node_modules/@codemirror/commands": {
"version": "6.8.0", "version": "6.8.1",
"resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.8.0.tgz", "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.8.1.tgz",
"integrity": "sha512-q8VPEFaEP4ikSlt6ZxjB3zW72+7osfAYW9i8Zu943uqbKuz6utc1+F170hyLUCUltXORjQXRyYQNfkckzA/bPQ==", "integrity": "sha512-KlGVYufHMQzxbdQONiLyGQDUW0itrLZwq3CcY7xpv9ZLRHqzkBSoteocBHtMCoY7/Ci4xhzSrToIeLg7FxHuaw==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@codemirror/language": "^6.0.0", "@codemirror/language": "^6.0.0",
@ -2295,9 +2289,9 @@
} }
}, },
"node_modules/@codemirror/lint": { "node_modules/@codemirror/lint": {
"version": "6.8.4", "version": "6.8.5",
"resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.4.tgz", "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.5.tgz",
"integrity": "sha512-u4q7PnZlJUojeRe8FJa/njJcMctISGgPQ4PnWsd9268R4ZTtU+tfFYmwkBvgcrK2+QQ8tYFVALVb5fVJykKc5A==", "integrity": "sha512-s3n3KisH7dx3vsoeGMxsbRAgKe4O1vbrnKBClm99PU0fWxmxsx5rR2PfqQgIt+2MMJBHbiJ5rfIdLYfB9NNvsA==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@codemirror/state": "^6.0.0", "@codemirror/state": "^6.0.0",
@ -2326,9 +2320,9 @@
} }
}, },
"node_modules/@codemirror/view": { "node_modules/@codemirror/view": {
"version": "6.36.4", "version": "6.36.6",
"resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.36.4.tgz", "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.36.6.tgz",
"integrity": "sha512-ZQ0V5ovw/miKEXTvjgzRyjnrk9TwriUB1k4R5p7uNnHR9Hus+D1SXHGdJshijEzPFjU25xea/7nhIeSqYFKdbA==", "integrity": "sha512-uxugGLet+Nzp0Jcit8Hn3LypM8ioMLKTsdf8FRoT3HWvZtb9GhaWMe0Cc15rz90Ljab4YFJiAulmIVB74OY0IQ==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@codemirror/state": "^6.5.0", "@codemirror/state": "^6.5.0",
@ -4611,16 +4605,16 @@
} }
}, },
"node_modules/@pkgr/core": { "node_modules/@pkgr/core": {
"version": "0.1.1", "version": "0.2.4",
"resolved": "https://registry.npmjs.org/@pkgr/core/-/core-0.1.1.tgz", "resolved": "https://registry.npmjs.org/@pkgr/core/-/core-0.2.4.tgz",
"integrity": "sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA==", "integrity": "sha512-ROFF39F6ZrnzSUEmQQZUar0Jt4xVoP9WnDRdWwF4NNcXs3xBTLgBUDoOwW141y1jP+S8nahIbdxbFC7IShw9Iw==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": "^12.20.0 || ^14.18.0 || >=16.0.0" "node": "^12.20.0 || ^14.18.0 || >=16.0.0"
}, },
"funding": { "funding": {
"url": "https://opencollective.com/unts" "url": "https://opencollective.com/pkgr"
} }
}, },
"node_modules/@pmmmwh/react-refresh-webpack-plugin": { "node_modules/@pmmmwh/react-refresh-webpack-plugin": {
@ -4674,12 +4668,12 @@
} }
}, },
"node_modules/@prometheus-io/codemirror-promql": { "node_modules/@prometheus-io/codemirror-promql": {
"version": "0.302.1", "version": "0.303.0",
"resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.302.1.tgz", "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.303.0.tgz",
"integrity": "sha512-u2uZbVKwz7UeJarE1LcOzbxiocetpgoqZ3ngs9HKOHG48i2dFUEXDfn4zs4dhuClQ/NixirmdGhSYq3l6b+9Yw==", "integrity": "sha512-qakS0MKnv/yBaX3tz2Lz3b3rCANpAyJhzzcI3AFxzyHWPdOUxZC64IkSjJG4SM1aLgRD0LAWH6xdqXVhIlCrlA==",
"license": "Apache-2.0", "license": "Apache-2.0",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.302.1", "@prometheus-io/lezer-promql": "0.303.0",
"lru-cache": "^11.0.2" "lru-cache": "^11.0.2"
}, },
"engines": { "engines": {
@ -4695,9 +4689,9 @@
} }
}, },
"node_modules/@prometheus-io/lezer-promql": { "node_modules/@prometheus-io/lezer-promql": {
"version": "0.302.1", "version": "0.303.0",
"resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.302.1.tgz", "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.303.0.tgz",
"integrity": "sha512-pxaWbzqwRXe+/wS6VWLcMSD23bgct56GQccqTWZIu9atmwTCMHWRBjGjCaHa5OpIoQYLAgKKClIFkVZdcW00Mw==", "integrity": "sha512-mO8vE85ft4S7fmw+oYgrzWatX3UCVQZkSoCfoATsxNaM8tgKdh1FkHMnux4rWjC3NNNnyQFXYOnUVcFisL2i0A==",
"license": "Apache-2.0", "license": "Apache-2.0",
"peerDependencies": { "peerDependencies": {
"@lezer/highlight": "^1.1.2", "@lezer/highlight": "^1.1.2",
@ -5426,12 +5420,12 @@
"dev": true "dev": true
}, },
"node_modules/@types/node": { "node_modules/@types/node": {
"version": "22.13.11", "version": "22.15.2",
"resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.11.tgz", "resolved": "https://registry.npmjs.org/@types/node/-/node-22.15.2.tgz",
"integrity": "sha512-iEUCUJoU0i3VnrCmgoWCXttklWcvoCIx4jzcP22fioIVSdTmjgoEvmAO/QPw6TcS9k5FrNgn4w7q5lGOd1CT5g==", "integrity": "sha512-uKXqKN9beGoMdBfcaTY1ecwz6ctxuJAcUlwE55938g0ZJ8lRxwAZqRz2AJ4pzpt5dHdTPMB863UZ0ESiFUcP7A==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"undici-types": "~6.20.0" "undici-types": "~6.21.0"
} }
}, },
"node_modules/@types/node-forge": { "node_modules/@types/node-forge": {
@ -5480,10 +5474,11 @@
"dev": true "dev": true
}, },
"node_modules/@types/react": { "node_modules/@types/react": {
"version": "17.0.80", "version": "17.0.85",
"resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.80.tgz", "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.85.tgz",
"integrity": "sha512-LrgHIu2lEtIo8M7d1FcI3BdwXWoRQwMoXOZ7+dPTW0lYREjmlHl3P0U1VD0i/9tppOuv8/sam7sOjx34TxSFbA==", "integrity": "sha512-5oBDUsRDsrYq4DdyHaL99gE1AJCfuDhyxqF6/55fvvOIRkp1PpKuwJ+aMiGJR+GJt7YqMNclPROTHF20vY2cXA==",
"dev": true, "dev": true,
"license": "MIT",
"dependencies": { "dependencies": {
"@types/prop-types": "*", "@types/prop-types": "*",
"@types/scheduler": "^0.16", "@types/scheduler": "^0.16",
@ -5500,12 +5495,13 @@
} }
}, },
"node_modules/@types/react-dom": { "node_modules/@types/react-dom": {
"version": "17.0.25", "version": "17.0.26",
"resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.25.tgz", "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.26.tgz",
"integrity": "sha512-urx7A7UxkZQmThYA4So0NelOVjx3V4rNFVJwp0WZlbIK5eM4rNJDiN3R/E9ix0MBh6kAEojk/9YL+Te6D9zHNA==", "integrity": "sha512-Z+2VcYXJwOqQ79HreLU/1fyQ88eXSSFh6I3JdrEHQIfYSI0kCQpTGvOrbE6jFGGYXKsHuwY9tBa/w5Uo6KzrEg==",
"dev": true, "dev": true,
"dependencies": { "license": "MIT",
"@types/react": "^17" "peerDependencies": {
"@types/react": "^17.0.0"
} }
}, },
"node_modules/@types/react-router": { "node_modules/@types/react-router": {
@ -5545,9 +5541,9 @@
"dev": true "dev": true
}, },
"node_modules/@types/sanitize-html": { "node_modules/@types/sanitize-html": {
"version": "2.13.0", "version": "2.15.0",
"resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.13.0.tgz", "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.15.0.tgz",
"integrity": "sha512-X31WxbvW9TjIhZZNyNBZ/p5ax4ti7qsNDBDEnH4zAgmEh35YnFD1UiS6z9Cd34kKm0LslFW0KPmTQzu/oGtsqQ==", "integrity": "sha512-71Z6PbYsVKfp4i6Jvr37s5ql6if1Q/iJQT80NbaSi7uGaG8CqBMXP0pk/EsURAOuGdk5IJCd/vnzKrR7S3Txsw==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@ -9364,9 +9360,9 @@
} }
}, },
"node_modules/eslint-config-prettier": { "node_modules/eslint-config-prettier": {
"version": "10.1.1", "version": "10.1.2",
"resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-10.1.1.tgz", "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-10.1.2.tgz",
"integrity": "sha512-4EQQr6wXwS+ZJSzaR5ZCrYgLxqvUjdXctaEtBqHcbkW944B1NQyO4qpdHQbXBONfwxXdkAY81HH4+LUfrg+zPw==", "integrity": "sha512-Epgp/EofAUeEpIdZkW60MHKvPyru1ruQJxPL+WIycnaPApuseK0Zpkrh/FwL9oIpQvIhJwV7ptOy0DWUjTlCiA==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"bin": { "bin": {
@ -9584,14 +9580,14 @@
} }
}, },
"node_modules/eslint-plugin-prettier": { "node_modules/eslint-plugin-prettier": {
"version": "5.2.3", "version": "5.2.6",
"resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.2.3.tgz", "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.2.6.tgz",
"integrity": "sha512-qJ+y0FfCp/mQYQ/vWQ3s7eUlFEL4PyKfAJxsnYTJ4YT73nsJBWqmEpFryxV9OeUiqmsTsYJ5Y+KDNaeP31wrRw==", "integrity": "sha512-mUcf7QG2Tjk7H055Jk0lGBjbgDnfrvqjhXh9t2xLMSCjZVcw9Rb1V6sVNXO0th3jgeO7zllWPTNRil3JW94TnQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"prettier-linter-helpers": "^1.0.0", "prettier-linter-helpers": "^1.0.0",
"synckit": "^0.9.1" "synckit": "^0.11.0"
}, },
"engines": { "engines": {
"node": "^14.18.0 || >=16.0.0" "node": "^14.18.0 || >=16.0.0"
@ -9602,7 +9598,7 @@
"peerDependencies": { "peerDependencies": {
"@types/eslint": ">=8.0.0", "@types/eslint": ">=8.0.0",
"eslint": ">=8.0.0", "eslint": ">=8.0.0",
"eslint-config-prettier": "*", "eslint-config-prettier": ">= 7.0.0 <10.0.0 || >=10.1.0",
"prettier": ">=3.0.0" "prettier": ">=3.0.0"
}, },
"peerDependenciesMeta": { "peerDependenciesMeta": {
@ -11416,9 +11412,9 @@
} }
}, },
"node_modules/http-proxy-middleware": { "node_modules/http-proxy-middleware": {
"version": "3.0.3", "version": "3.0.5",
"resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-3.0.3.tgz", "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-3.0.5.tgz",
"integrity": "sha512-usY0HG5nyDUwtqpiZdETNbmKtw3QQ1jwYFZ9wi5iHzX2BcILwQKtYDJPo7XHTsu5Z0B2Hj3W9NNnbd+AjFWjqg==", "integrity": "sha512-GLZZm1X38BPY4lkXA01jhwxvDoOkkXqjgVyUzVxiEK4iuRu03PZoYHhHRwxnfhQMDuaxi3vVri0YgSro/1oWqg==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@types/http-proxy": "^1.17.15", "@types/http-proxy": "^1.17.15",
@ -16382,9 +16378,9 @@
} }
}, },
"node_modules/moment-timezone": { "node_modules/moment-timezone": {
"version": "0.5.47", "version": "0.5.48",
"resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.47.tgz", "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.48.tgz",
"integrity": "sha512-UbNt/JAWS0m/NJOebR0QMRHBk0hu03r5dx9GK8Cs0AS3I81yDcOc9k+DytPItgVvBP7J6Mf6U2n3BPAacAV9oA==", "integrity": "sha512-f22b8LV1gbTO2ms2j2z13MuPogNoh5UzxL3nzNAYKGraILnbGc9NEE6dyiiiLv46DGRb8A4kg8UKWLjPthxBHw==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"moment": "^2.29.4" "moment": "^2.29.4"
@ -21212,9 +21208,9 @@
"dev": true "dev": true
}, },
"node_modules/sanitize-html": { "node_modules/sanitize-html": {
"version": "2.15.0", "version": "2.16.0",
"resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.15.0.tgz", "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.16.0.tgz",
"integrity": "sha512-wIjst57vJGpLyBP8ioUbg6ThwJie5SuSIjHxJg53v5Fg+kUK+AXlb7bK3RNXpp315MvwM+0OBGCV6h5pPHsVhA==", "integrity": "sha512-0s4caLuHHaZFVxFTG74oW91+j6vW7gKbGD6CD2+miP73CE6z6YtOBN0ArtLd2UGyi4IC7K47v3ENUbQX4jV3Mg==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"deepmerge": "^4.2.2", "deepmerge": "^4.2.2",
@ -21232,9 +21228,9 @@
"dev": true "dev": true
}, },
"node_modules/sass": { "node_modules/sass": {
"version": "1.86.0", "version": "1.87.0",
"resolved": "https://registry.npmjs.org/sass/-/sass-1.86.0.tgz", "resolved": "https://registry.npmjs.org/sass/-/sass-1.87.0.tgz",
"integrity": "sha512-zV8vGUld/+mP4KbMLJMX7TyGCuUp7hnkOScgCMsWuHtns8CWBoz+vmEhoGMXsaJrbUP8gj+F1dLvVe79sK8UdA==", "integrity": "sha512-d0NoFH4v6SjEK7BoX810Jsrhj7IQSYHAHLi/iSpgqKc7LaIDshFRlSg5LOymf9FqQhxEHs2W5ZQXlvy0KD45Uw==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"chokidar": "^4.0.0", "chokidar": "^4.0.0",
@ -21635,9 +21631,9 @@
"dev": true "dev": true
}, },
"node_modules/sinon": { "node_modules/sinon": {
"version": "19.0.4", "version": "19.0.5",
"resolved": "https://registry.npmjs.org/sinon/-/sinon-19.0.4.tgz", "resolved": "https://registry.npmjs.org/sinon/-/sinon-19.0.5.tgz",
"integrity": "sha512-myidFob7fjmYHJb+CHNLtAYScxn3sngGq4t75L2rCGGpE/k4OQVkN3KE5FsN+XkO2+fcDZ65PGvq3KHrlLAm7g==", "integrity": "sha512-r15s9/s+ub/d4bxNXqIUmwp6imVSdTorIRaxoecYjqTVLZ8RuoXr/4EDGwIBo6Waxn7f2gnURX9zuhAfCwaF6Q==",
"dev": true, "dev": true,
"license": "BSD-3-Clause", "license": "BSD-3-Clause",
"dependencies": { "dependencies": {
@ -22411,20 +22407,20 @@
"dev": true "dev": true
}, },
"node_modules/synckit": { "node_modules/synckit": {
"version": "0.9.2", "version": "0.11.4",
"resolved": "https://registry.npmjs.org/synckit/-/synckit-0.9.2.tgz", "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.11.4.tgz",
"integrity": "sha512-vrozgXDQwYO72vHjUb/HnFbQx1exDjoKzqx23aXEg2a9VIg2TSFZ8FmeZpTjUCFMYw7mpX4BE2SFu8wI7asYsw==", "integrity": "sha512-Q/XQKRaJiLiFIBNN+mndW7S/RHxvwzuZS6ZwmRzUBqJBv/5QIKCEwkBC8GBf8EQJKYnaFs0wOZbKTXBPj8L9oQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@pkgr/core": "^0.1.0", "@pkgr/core": "^0.2.3",
"tslib": "^2.6.2" "tslib": "^2.8.1"
}, },
"engines": { "engines": {
"node": "^14.18.0 || >=16.0.0" "node": "^14.18.0 || >=16.0.0"
}, },
"funding": { "funding": {
"url": "https://opencollective.com/unts" "url": "https://opencollective.com/synckit"
} }
}, },
"node_modules/tailwindcss": { "node_modules/tailwindcss": {
@ -22712,15 +22708,6 @@
"integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
"dev": true "dev": true
}, },
"node_modules/to-fast-properties": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
"integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/to-regex-range": { "node_modules/to-regex-range": {
"version": "5.0.1", "version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
@ -22789,9 +22776,9 @@
"dev": true "dev": true
}, },
"node_modules/ts-jest": { "node_modules/ts-jest": {
"version": "29.2.6", "version": "29.3.2",
"resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.2.6.tgz", "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.3.2.tgz",
"integrity": "sha512-yTNZVZqc8lSixm+QGVFcPe6+yj7+TWZwIesuOWvfcn4B9bz5x4NDzVCQQjOs7Hfouu36aEqfEbo9Qpo+gq8dDg==", "integrity": "sha512-bJJkrWc6PjFVz5g2DGCNUo8z7oFEYaz1xP1NpeDU7KNLMWPpEyV8Chbpkn8xjzgRDpQhnGMyvyldoL7h8JXyug==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@ -22803,6 +22790,7 @@
"lodash.memoize": "^4.1.2", "lodash.memoize": "^4.1.2",
"make-error": "^1.3.6", "make-error": "^1.3.6",
"semver": "^7.7.1", "semver": "^7.7.1",
"type-fest": "^4.39.1",
"yargs-parser": "^21.1.1" "yargs-parser": "^21.1.1"
}, },
"bin": { "bin": {
@ -22903,6 +22891,19 @@
"node": "^14.15.0 || ^16.10.0 || >=18.0.0" "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
} }
}, },
"node_modules/ts-jest/node_modules/type-fest": {
"version": "4.40.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.40.1.tgz",
"integrity": "sha512-9YvLNnORDpI+vghLU/Nf+zSv0kL47KbVJ1o3sKgoTefl6i+zebxbiDQWoe/oWWqPhIgQdRZRT1KA9sCPL810SA==",
"dev": true,
"license": "(MIT OR CC0-1.0)",
"engines": {
"node": ">=16"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/tsconfig-paths": { "node_modules/tsconfig-paths": {
"version": "3.15.0", "version": "3.15.0",
"resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz", "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz",
@ -22937,9 +22938,10 @@
} }
}, },
"node_modules/tslib": { "node_modules/tslib": {
"version": "2.6.2", "version": "2.8.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
"integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
"license": "0BSD"
}, },
"node_modules/tsutils": { "node_modules/tsutils": {
"version": "3.21.0", "version": "3.21.0",
@ -23131,9 +23133,9 @@
"dev": true "dev": true
}, },
"node_modules/undici-types": { "node_modules/undici-types": {
"version": "6.20.0", "version": "6.21.0",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
"integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/unicode-canonical-property-names-ecmascript": { "node_modules/unicode-canonical-property-names-ecmascript": {
@ -23627,10 +23629,11 @@
} }
}, },
"node_modules/webpack-dev-server/node_modules/http-proxy-middleware": { "node_modules/webpack-dev-server/node_modules/http-proxy-middleware": {
"version": "2.0.7", "version": "2.0.9",
"resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz", "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz",
"integrity": "sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==", "integrity": "sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==",
"dev": true, "dev": true,
"license": "MIT",
"dependencies": { "dependencies": {
"@types/http-proxy": "^1.17.8", "@types/http-proxy": "^1.17.8",
"http-proxy": "^1.18.1", "http-proxy": "^1.18.1",

View File

@ -4,12 +4,12 @@
"private": true, "private": true,
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.18.6", "@codemirror/autocomplete": "^6.18.6",
"@codemirror/commands": "^6.8.0", "@codemirror/commands": "^6.8.1",
"@codemirror/language": "^6.11.0", "@codemirror/language": "^6.11.0",
"@codemirror/lint": "^6.8.4", "@codemirror/lint": "^6.8.5",
"@codemirror/search": "^6.5.10", "@codemirror/search": "^6.5.10",
"@codemirror/state": "^6.5.2", "@codemirror/state": "^6.5.2",
"@codemirror/view": "^6.36.4", "@codemirror/view": "^6.36.6",
"@forevolve/bootstrap-dark": "^4.0.2", "@forevolve/bootstrap-dark": "^4.0.2",
"@fortawesome/fontawesome-svg-core": "6.7.2", "@fortawesome/fontawesome-svg-core": "6.7.2",
"@fortawesome/free-solid-svg-icons": "6.7.2", "@fortawesome/free-solid-svg-icons": "6.7.2",
@ -19,16 +19,16 @@
"@lezer/lr": "^1.4.2", "@lezer/lr": "^1.4.2",
"@nexucis/fuzzy": "^0.5.1", "@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1", "@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.302.1", "@prometheus-io/codemirror-promql": "0.303.0",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^9.0.9", "downshift": "^9.0.9",
"http-proxy-middleware": "^3.0.3", "http-proxy-middleware": "^3.0.5",
"jquery": "^3.7.1", "jquery": "^3.7.1",
"jquery.flot.tooltip": "^0.9.0", "jquery.flot.tooltip": "^0.9.0",
"moment": "^2.30.1", "moment": "^2.30.1",
"moment-timezone": "^0.5.47", "moment-timezone": "^0.5.48",
"popper.js": "^1.14.3", "popper.js": "^1.16.1",
"react": "^17.0.2", "react": "^17.0.2",
"react-copy-to-clipboard": "^5.1.0", "react-copy-to-clipboard": "^5.1.0",
"react-dom": "^17.0.2", "react-dom": "^17.0.2",
@ -37,8 +37,8 @@
"react-router-dom": "^5.3.4", "react-router-dom": "^5.3.4",
"react-test-renderer": "^17.0.2", "react-test-renderer": "^17.0.2",
"reactstrap": "^8.10.1", "reactstrap": "^8.10.1",
"sanitize-html": "^2.15.0", "sanitize-html": "^2.16.0",
"sass": "1.86.0", "sass": "1.87.0",
"tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-bootstrap-4": "^5.39.2",
"tempusdominus-core": "^5.19.3" "tempusdominus-core": "^5.19.3"
}, },
@ -71,26 +71,26 @@
"@types/flot": "0.0.36", "@types/flot": "0.0.36",
"@types/jest": "^29.5.14", "@types/jest": "^29.5.14",
"@types/jquery": "^3.5.32", "@types/jquery": "^3.5.32",
"@types/node": "^22.13.11", "@types/node": "^22.15.2",
"@types/react": "^17.0.71", "@types/react": "^17.0.85",
"@types/react-copy-to-clipboard": "^5.0.7", "@types/react-copy-to-clipboard": "^5.0.7",
"@types/react-dom": "^17.0.25", "@types/react-dom": "^17.0.26",
"@types/react-router-dom": "^5.3.3", "@types/react-router-dom": "^5.3.3",
"@types/sanitize-html": "^2.13.0", "@types/sanitize-html": "^2.15.0",
"@types/sinon": "^17.0.4", "@types/sinon": "^17.0.4",
"@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0",
"enzyme": "^3.11.0", "enzyme": "^3.11.0",
"enzyme-to-json": "^3.6.2", "enzyme-to-json": "^3.6.2",
"eslint-config-prettier": "^10.1.1", "eslint-config-prettier": "^10.1.2",
"eslint-config-react-app": "^7.0.1", "eslint-config-react-app": "^7.0.1",
"eslint-plugin-prettier": "^5.2.3", "eslint-plugin-prettier": "^5.2.6",
"jest-canvas-mock": "^2.5.2", "jest-canvas-mock": "^2.5.2",
"jest-fetch-mock": "^3.0.3", "jest-fetch-mock": "^3.0.3",
"mutationobserver-shim": "^0.3.7", "mutationobserver-shim": "^0.3.7",
"prettier": "^3.5.3", "prettier": "^3.5.3",
"react-scripts": "^5.0.1", "react-scripts": "^5.0.1",
"sinon": "^19.0.4", "sinon": "^19.0.5",
"ts-jest": "^29.2.6" "ts-jest": "^29.3.2"
}, },
"jest": { "jest": {
"snapshotSerializers": [ "snapshotSerializers": [

View File

@ -290,6 +290,7 @@ type Options struct {
EnableRemoteWriteReceiver bool EnableRemoteWriteReceiver bool
EnableOTLPWriteReceiver bool EnableOTLPWriteReceiver bool
ConvertOTLPDelta bool ConvertOTLPDelta bool
NativeOTLPDeltaIngestion bool
IsAgent bool IsAgent bool
CTZeroIngestionEnabled bool CTZeroIngestionEnabled bool
AppName string AppName string
@ -389,6 +390,7 @@ func New(logger *slog.Logger, o *Options) *Handler {
o.AcceptRemoteWriteProtoMsgs, o.AcceptRemoteWriteProtoMsgs,
o.EnableOTLPWriteReceiver, o.EnableOTLPWriteReceiver,
o.ConvertOTLPDelta, o.ConvertOTLPDelta,
o.NativeOTLPDeltaIngestion,
o.CTZeroIngestionEnabled, o.CTZeroIngestionEnabled,
) )