diff --git a/.github/workflows/automerge-dependabot.yml b/.github/workflows/automerge-dependabot.yml
index 3909f57329..616e4ee8b6 100644
--- a/.github/workflows/automerge-dependabot.yml
+++ b/.github/workflows/automerge-dependabot.yml
@@ -19,7 +19,7 @@ jobs:
steps:
- name: Dependabot metadata
id: metadata
- uses: dependabot/fetch-metadata@d7267f607e9d3fb96fc2fbe83e0af444713e90b7 # v2.3.0
+ uses: dependabot/fetch-metadata@08eff52bf64351f401fb50d4972fa95b9f2c2d1b # v2.4.0
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
- name: Enable auto-merge for Dependabot PRs
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ea10fd0091..ce5a290a16 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -95,7 +95,7 @@ jobs:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
+ - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.24.x
- run: |
@@ -205,7 +205,7 @@ jobs:
with:
persist-credentials: false
- name: Install Go
- uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
+ uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
cache: false
go-version: 1.24.x
@@ -220,18 +220,18 @@ jobs:
with:
persist-credentials: false
- name: Install Go
- uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
+ uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.24.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
- uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0
+ uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
with:
args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
- version: v2.1.5
+ version: v2.2.1
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index b444815d3c..370e7537e2 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -29,12 +29,12 @@ jobs:
persist-credentials: false
- name: Initialize CodeQL
- uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
+ uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
with:
languages: ${{ matrix.language }}
- name: Autobuild
- uses: github/codeql-action/autobuild@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
+ uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
+ uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index c2335a8e46..89386efd7a 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -26,7 +26,7 @@ jobs:
persist-credentials: false
- name: "Run analysis"
- uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # tag=v2.4.1
+ uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # tag=v2.4.2
with:
results_file: results.sarif
results_format: sarif
@@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # tag=v3.28.16
+ uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # tag=v3.29.2
with:
sarif_file: results.sarif
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c4a906d09b..adb6b0fe0f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,8 @@
## main / unreleased
+* [BUGFIX] OTLP receiver: Generate `target_info` samples between the earliest and latest samples per resource. #16737
+
## 3.5.0 / 2025-07-14
* [FEATURE] PromQL: Add experimental type and unit metadata labels, behind feature flag `type-and-unit-labels`. #16228 #16632 #16718 #16743
@@ -246,6 +248,11 @@ This release includes new features such as a brand new UI and UTF-8 support enab
* [BUGFIX] Autoreload: Reload invalid yaml files. #14947
* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029
+## 2.53.5 / 2025-06-30
+
+* [ENHANCEMENT] TSDB: Add backward compatibility with the upcoming TSDB block index v3 #16762
+* [BUGFIX] Top-level: Update GOGC before loading TSDB #16521
+
## 2.53.4 / 2025-03-18
* [BUGFIX] Runtime: fix GOGC is being set to 0 when installed with empty prometheus.yml file resulting high cpu usage. #16090
@@ -260,7 +267,7 @@ This release includes new features such as a brand new UI and UTF-8 support enab
Fix a bug where Prometheus would crash with a segmentation fault if a remote-read
request accessed a block on disk at about the same time as TSDB created a new block.
-[BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515,#14523
+* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515,#14523
## 2.55.1 / 2024-11-04
diff --git a/Makefile b/Makefile
index 0b5935de00..beff8c4a80 100644
--- a/Makefile
+++ b/Makefile
@@ -189,6 +189,6 @@ update-all-go-deps:
@$(MAKE) update-go-deps
@echo ">> updating Go dependencies in ./documentation/examples/remote_storage/"
@cd ./documentation/examples/remote_storage/ && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
- $(GO) get -d $$m; \
+ $(GO) get $$m; \
done
@cd ./documentation/examples/remote_storage/ && $(GO) mod tidy
diff --git a/Makefile.common b/Makefile.common
index 4de21512ff..1f4c9025a5 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v2.1.5
+GOLANGCI_LINT_VERSION ?= v2.2.1
GOLANGCI_FMT_OPTS ?=
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
@@ -139,7 +139,7 @@ common-deps:
update-go-deps:
@echo ">> updating Go dependencies"
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
- $(GO) get -d $$m; \
+ $(GO) get $$m; \
done
$(GO) mod tidy
diff --git a/README.md b/README.md
index 26262734c0..1743c5a4b8 100644
--- a/README.md
+++ b/README.md
@@ -67,9 +67,9 @@ Prometheus will now be reachable at .
To build Prometheus from source code, You need:
-* Go [version 1.22 or greater](https://golang.org/doc/install).
-* NodeJS [version 22 or greater](https://nodejs.org/).
-* npm [version 8 or greater](https://www.npmjs.com/).
+* Go: Version specified in [go.mod](./go.mod) or greater.
+* NodeJS: Version specified in [.nvmrc](./web/ui/.nvmrc) or greater.
+* npm: Version 8 or greater (check with `npm --version` and [here](https://www.npmjs.com/)).
Start by cloning the repository:
diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go
index c6a5801d28..7b5b6a603a 100644
--- a/cmd/promtool/main.go
+++ b/cmd/promtool/main.go
@@ -89,6 +89,8 @@ var (
lintConfigOptions = append(append([]string{}, lintRulesOptions...), lintOptionTooLongScrapeInterval)
)
+const httpConfigFileDescription = "HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool"
+
func main() {
var (
httpRoundTripper = api.DefaultRoundTripper
@@ -138,11 +140,11 @@ func main() {
).Required().ExistingFiles()
checkServerHealthCmd := checkCmd.Command("healthy", "Check if the Prometheus server is healthy.")
- checkServerHealthCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
+ checkServerHealthCmd.Flag("http.config.file", httpConfigFileDescription).PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
checkServerHealthCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
checkServerReadyCmd := checkCmd.Command("ready", "Check if the Prometheus server is ready.")
- checkServerReadyCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
+ checkServerReadyCmd.Flag("http.config.file", httpConfigFileDescription).PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
checkServerReadyCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.")
@@ -165,7 +167,7 @@ func main() {
queryCmd := app.Command("query", "Run query against a Prometheus server.")
queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json")
- queryCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
+ queryCmd.Flag("http.config.file", httpConfigFileDescription).PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
queryInstantCmd := queryCmd.Command("instant", "Run instant query.")
queryInstantCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL)
@@ -210,7 +212,7 @@ func main() {
queryAnalyzeCmd.Flag("match", "Series selector. Can be specified multiple times.").Required().StringsVar(&queryAnalyzeCfg.matchers)
pushCmd := app.Command("push", "Push to a Prometheus server.")
- pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
+ pushCmd.Flag("http.config.file", httpConfigFileDescription).PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).")
pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL)
metricFiles := pushMetricsCmd.Arg(
@@ -277,7 +279,7 @@ func main() {
importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String()
importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String()
importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.")
- importRulesCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
+ importRulesCmd.Flag("http.config.file", httpConfigFileDescription).PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
importRulesCmd.Flag("url", "The URL for the Prometheus API with the data where the rule will be backfilled from.").Default("http://localhost:9090").URLVar(&serverURL)
importRulesStart := importRulesCmd.Flag("start", "The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required.").
Required().String()
diff --git a/config/config.go b/config/config.go
index a60cfcb6b9..137a292145 100644
--- a/config/config.go
+++ b/config/config.go
@@ -68,11 +68,6 @@ var (
}
)
-const (
- LegacyValidationConfig = "legacy"
- UTF8ValidationConfig = "utf8"
-)
-
// Load parses the YAML input s into a Config.
func Load(s string, logger *slog.Logger) (*Config, error) {
cfg := &Config{}
@@ -109,10 +104,10 @@ func Load(s string, logger *slog.Logger) (*Config, error) {
}
switch cfg.OTLPConfig.TranslationStrategy {
- case UnderscoreEscapingWithSuffixes:
+ case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes:
case "":
case NoTranslation, NoUTF8EscapingWithSuffixes:
- if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig {
+ if cfg.GlobalConfig.MetricNameValidationScheme == model.LegacyValidation {
return nil, fmt.Errorf("OTLP translation strategy %q is not allowed when UTF8 is disabled", cfg.OTLPConfig.TranslationStrategy)
}
default:
@@ -172,7 +167,7 @@ var (
ScrapeProtocols: DefaultScrapeProtocols,
ConvertClassicHistogramsToNHCB: false,
AlwaysScrapeClassicHistograms: false,
- MetricNameValidationScheme: UTF8ValidationConfig,
+ MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
}
@@ -486,8 +481,8 @@ type GlobalConfig struct {
// 0 means no limit.
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
// Allow UTF8 Metric and Label Names. Can be blank in config files but must
- // have a value if a ScrapeConfig is created programmatically.
- MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
+ // have a value if a GlobalConfig is created programmatically.
+ MetricNameValidationScheme model.ValidationScheme `yaml:"metric_name_validation_scheme,omitempty"`
// Metric name escaping mode to request through content negotiation. Can be
// blank in config files but must have a value if a ScrapeConfig is created
// programmatically.
@@ -755,7 +750,7 @@ type ScrapeConfig struct {
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
// Allow UTF8 Metric and Label Names. Can be blank in config files but must
// have a value if a ScrapeConfig is created programmatically.
- MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
+ MetricNameValidationScheme model.ValidationScheme `yaml:"metric_name_validation_scheme,omitempty"`
// Metric name escaping mode to request through content negotiation. Can be
// blank in config files but must have a value if a ScrapeConfig is created
// programmatically.
@@ -882,32 +877,32 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
}
switch globalConfig.MetricNameValidationScheme {
- case "":
- globalConfig.MetricNameValidationScheme = UTF8ValidationConfig
- case LegacyValidationConfig, UTF8ValidationConfig:
+ case model.UnsetValidation:
+ globalConfig.MetricNameValidationScheme = model.UTF8Validation
+ case model.LegacyValidation, model.UTF8Validation:
default:
- return fmt.Errorf("unknown global name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
+ return fmt.Errorf("unknown global name validation method specified, must be either '', 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
}
// Scrapeconfig validation scheme matches global if left blank.
switch c.MetricNameValidationScheme {
- case "":
+ case model.UnsetValidation:
c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme
- case LegacyValidationConfig, UTF8ValidationConfig:
+ case model.LegacyValidation, model.UTF8Validation:
default:
- return fmt.Errorf("unknown scrape config name validation method specified, must be either 'legacy' or 'utf8', got %s", c.MetricNameValidationScheme)
+ return fmt.Errorf("unknown scrape config name validation method specified, must be either '', 'legacy' or 'utf8', got %s", c.MetricNameValidationScheme)
}
// Escaping scheme is based on the validation scheme if left blank.
switch globalConfig.MetricNameEscapingScheme {
case "":
- if globalConfig.MetricNameValidationScheme == LegacyValidationConfig {
+ if globalConfig.MetricNameValidationScheme == model.LegacyValidation {
globalConfig.MetricNameEscapingScheme = model.EscapeUnderscores
} else {
globalConfig.MetricNameEscapingScheme = model.AllowUTF8
}
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
default:
- return fmt.Errorf("unknown global name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %s", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, globalConfig.MetricNameValidationScheme)
+ return fmt.Errorf("unknown global name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %q", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, globalConfig.MetricNameEscapingScheme)
}
if c.MetricNameEscapingScheme == "" {
@@ -916,12 +911,12 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
switch c.MetricNameEscapingScheme {
case model.AllowUTF8:
- if c.MetricNameValidationScheme != UTF8ValidationConfig {
+ if c.MetricNameValidationScheme != model.UTF8Validation {
return errors.New("utf8 metric names requested but validation scheme is not set to UTF8")
}
case model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
default:
- return fmt.Errorf("unknown scrape config name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %s", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, c.MetricNameValidationScheme)
+ return fmt.Errorf("unknown scrape config name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %q", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, c.MetricNameEscapingScheme)
}
if c.ConvertClassicHistogramsToNHCB == nil {
@@ -942,26 +937,6 @@ func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
return discovery.MarshalYAMLWithInlineConfigs(c)
}
-// ToValidationScheme returns the validation scheme for the given string config value.
-func ToValidationScheme(s string) (validationScheme model.ValidationScheme, err error) {
- switch s {
- case "":
- // This is a workaround for third party exporters that don't set the validation scheme.
- if DefaultGlobalConfig.MetricNameValidationScheme == "" {
- return model.UTF8Validation, errors.New("global metric name validation scheme is not set")
- }
- return ToValidationScheme(DefaultGlobalConfig.MetricNameValidationScheme)
- case UTF8ValidationConfig:
- validationScheme = model.UTF8Validation
- case LegacyValidationConfig:
- validationScheme = model.LegacyValidation
- default:
- return model.UTF8Validation, fmt.Errorf("invalid metric name validation scheme, %s", s)
- }
-
- return validationScheme, nil
-}
-
// ToEscapingScheme wraps the equivalent common library function with the
// desired default behavior based on the given validation scheme. This is a
// workaround for third party exporters that don't set the escaping scheme.
@@ -972,6 +947,10 @@ func ToEscapingScheme(s string, v model.ValidationScheme) (model.EscapingScheme,
return model.NoEscaping, nil
case model.LegacyValidation:
return model.UnderscoreEscaping, nil
+ case model.UnsetValidation:
+ return model.NoEscaping, fmt.Errorf("v is unset: %s", v)
+ default:
+ panic(fmt.Errorf("unhandled validation scheme: %s", v))
}
}
return model.ToEscapingScheme(s)
@@ -1555,31 +1534,68 @@ func getGoGC() int {
type translationStrategyOption string
var (
- // NoUTF8EscapingWithSuffixes will accept metric/label names as they are.
- // Unit and type suffixes may be added to metric names, according to certain rules.
+ // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. Unit
+ // and type suffixes may be added to metric names, according to certain rules.
NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes"
- // UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus.
- // This option will translate metric name characters that are not alphanumerics/underscores/colons to underscores,
- // and label name characters that are not alphanumerics/underscores to underscores.
- // Unit and type suffixes may be appended to metric names, according to certain rules.
+ // UnderscoreEscapingWithSuffixes is the default option for translating OTLP
+ // to Prometheus. This option will translate metric name characters that are
+ // not alphanumerics/underscores/colons to underscores, and label name
+ // characters that are not alphanumerics/underscores to underscores. Unit and
+ // type suffixes may be appended to metric names, according to certain rules.
UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes"
+ // UnderscoreEscapingWithoutSuffixes translates metric name characters that
+ // are not alphanumerics/underscores/colons to underscores, and label name
+ // characters that are not alphanumerics/underscores to underscores, but
+ // unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to
+ // the names.
+ UnderscoreEscapingWithoutSuffixes translationStrategyOption = "UnderscoreEscapingWithoutSuffixes"
// NoTranslation (EXPERIMENTAL): disables all translation of incoming metric
- // and label names. This offers a way for the OTLP users to use native metric names, reducing confusion.
+ // and label names. This offers a way for the OTLP users to use native metric
+ // names, reducing confusion.
//
// WARNING: This setting has significant known risks and limitations (see
- // https://prometheus.io/docs/practices/naming/ for details):
- // * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration).
- // * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed
- // time series. For instance, you may end up in situation of ingesting `foo.bar` series with unit
- // `seconds` and a separate series `foo.bar` with unit `milliseconds`.
+ // https://prometheus.io/docs/practices/naming/ for details): * Impaired UX
+ // when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling
+ // configuration). * Series collisions which in the best case may result in
+ // OOO errors, in the worst case a silently malformed time series. For
+ // instance, you may end up in situation of ingesting `foo.bar` series with
+ // unit `seconds` and a separate series `foo.bar` with unit `milliseconds`.
//
- // As a result, this setting is experimental and currently, should not be used in
- // production systems.
+ // As a result, this setting is experimental and currently, should not be used
+ // in production systems.
//
- // TODO(ArthurSens): Mention `type-and-unit-labels` feature (https://github.com/prometheus/proposals/pull/39) once released, as potential mitigation of the above risks.
+ // TODO(ArthurSens): Mention `type-and-unit-labels` feature
+ // (https://github.com/prometheus/proposals/pull/39) once released, as
+ // potential mitigation of the above risks.
NoTranslation translationStrategyOption = "NoTranslation"
)
+// ShouldEscape returns true if the translation strategy requires that metric
+// names be escaped.
+func (o translationStrategyOption) ShouldEscape() bool {
+ switch o {
+ case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes:
+ return true
+ case NoTranslation, NoUTF8EscapingWithSuffixes:
+ return false
+ default:
+ return false
+ }
+}
+
+// ShouldAddSuffixes returns a bool deciding whether the given translation
+// strategy should have suffixes added.
+func (o translationStrategyOption) ShouldAddSuffixes() bool {
+ switch o {
+ case UnderscoreEscapingWithSuffixes, NoUTF8EscapingWithSuffixes:
+ return true
+ case UnderscoreEscapingWithoutSuffixes, NoTranslation:
+ return false
+ default:
+ return false
+ }
+}
+
// OTLPConfig is the configuration for writing to the OTLP endpoint.
type OTLPConfig struct {
PromoteAllResourceAttributes bool `yaml:"promote_all_resource_attributes,omitempty"`
diff --git a/config/config_test.go b/config/config_test.go
index 1e3a04fb93..2360a8b48f 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -2567,7 +2567,7 @@ func TestGetScrapeConfigs(t *testing.T) {
ScrapeInterval: opts.ScrapeInterval,
ScrapeTimeout: opts.ScrapeTimeout,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
- MetricNameValidationScheme: UTF8ValidationConfig,
+ MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
MetricsPath: "/metrics",
@@ -2627,7 +2627,7 @@ func TestGetScrapeConfigs(t *testing.T) {
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
- MetricNameValidationScheme: UTF8ValidationConfig,
+ MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@@ -2664,7 +2664,7 @@ func TestGetScrapeConfigs(t *testing.T) {
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
- MetricNameValidationScheme: UTF8ValidationConfig,
+ MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
@@ -2788,27 +2788,27 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
tests := []struct {
name string
inputFile string
- expectScheme string
+ expectScheme model.ValidationScheme
}{
{
name: "blank config implies default",
inputFile: "scrape_config_default_validation_mode",
- expectScheme: "utf8",
+ expectScheme: model.UTF8Validation,
},
{
name: "global setting implies local settings",
inputFile: "scrape_config_global_validation_mode",
- expectScheme: "legacy",
+ expectScheme: model.LegacyValidation,
},
{
name: "local setting",
inputFile: "scrape_config_local_validation_mode",
- expectScheme: "legacy",
+ expectScheme: model.LegacyValidation,
},
{
name: "local setting overrides global setting",
inputFile: "scrape_config_local_global_validation_mode",
- expectScheme: "utf8",
+ expectScheme: model.UTF8Validation,
},
}
@@ -2832,31 +2832,31 @@ func TestScrapeConfigNameEscapingSettings(t *testing.T) {
tests := []struct {
name string
inputFile string
- expectValidationScheme string
+ expectValidationScheme model.ValidationScheme
expectEscapingScheme string
}{
{
name: "blank config implies default",
inputFile: "scrape_config_default_validation_mode",
- expectValidationScheme: "utf8",
+ expectValidationScheme: model.UTF8Validation,
expectEscapingScheme: "allow-utf-8",
},
{
name: "global setting implies local settings",
inputFile: "scrape_config_global_validation_mode",
- expectValidationScheme: "legacy",
+ expectValidationScheme: model.LegacyValidation,
expectEscapingScheme: "dots",
},
{
name: "local setting",
inputFile: "scrape_config_local_validation_mode",
- expectValidationScheme: "legacy",
+ expectValidationScheme: model.LegacyValidation,
expectEscapingScheme: "values",
},
{
name: "local setting overrides global setting",
inputFile: "scrape_config_local_global_validation_mode",
- expectValidationScheme: "utf8",
+ expectValidationScheme: model.UTF8Validation,
expectEscapingScheme: "dots",
},
}
diff --git a/discovery/manager.go b/discovery/manager.go
index 6e9bab1d7c..51a46ca231 100644
--- a/discovery/manager.go
+++ b/discovery/manager.go
@@ -57,6 +57,8 @@ func (p *Provider) Discoverer() Discoverer {
// IsStarted return true if Discoverer is started.
func (p *Provider) IsStarted() bool {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
return p.cancel != nil
}
@@ -216,15 +218,22 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
newProviders []*Provider
)
for _, prov := range m.providers {
- // Cancel obsolete providers.
- if len(prov.newSubs) == 0 {
+ // Cancel obsolete providers if it has no new subs and it has a cancel function.
+ // prov.cancel != nil is the same check as we use in IsStarted() method but we don't call IsStarted
+ // here because it would take a lock and we need the same lock ourselves for other reads.
+ prov.mu.RLock()
+ if len(prov.newSubs) == 0 && prov.cancel != nil {
wg.Add(1)
prov.done = func() {
wg.Done()
}
+
prov.cancel()
+ prov.mu.RUnlock()
continue
}
+ prov.mu.RUnlock()
+
newProviders = append(newProviders, prov)
// refTargets keeps reference targets used to populate new subs' targets as they should be the same.
var refTargets map[string]*targetgroup.Group
@@ -298,7 +307,9 @@ func (m *Manager) startProvider(ctx context.Context, p *Provider) {
ctx, cancel := context.WithCancel(ctx)
updates := make(chan []*targetgroup.Group)
+ p.mu.Lock()
p.cancel = cancel
+ p.mu.Unlock()
go p.d.Run(ctx, updates)
go m.updater(ctx, p, updates)
@@ -306,16 +317,20 @@ func (m *Manager) startProvider(ctx context.Context, p *Provider) {
// cleaner cleans resources associated with provider.
func (m *Manager) cleaner(p *Provider) {
- p.mu.RLock()
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
m.targetsMtx.Lock()
for s := range p.subs {
delete(m.targets, poolKey{s, p.name})
}
m.targetsMtx.Unlock()
- p.mu.RUnlock()
if p.done != nil {
p.done()
}
+
+ // Provider was cleaned so mark is as down.
+ p.cancel = nil
}
func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targetgroup.Group) {
@@ -350,8 +365,10 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ
func (m *Manager) sender() {
ticker := time.NewTicker(m.updatert)
- defer ticker.Stop()
-
+ defer func() {
+ ticker.Stop()
+ close(m.syncCh)
+ }()
for {
select {
case <-m.ctx.Done():
@@ -380,9 +397,11 @@ func (m *Manager) cancelDiscoverers() {
m.mtx.RLock()
defer m.mtx.RUnlock()
for _, p := range m.providers {
+ p.mu.RLock()
if p.cancel != nil {
p.cancel()
}
+ p.mu.RUnlock()
}
}
@@ -491,19 +510,3 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int {
}
return failed
}
-
-// StaticProvider holds a list of target groups that never change.
-type StaticProvider struct {
- TargetGroups []*targetgroup.Group
-}
-
-// Run implements the Worker interface.
-func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
- // We still have to consider that the consumer exits right away in which case
- // the context will be canceled.
- select {
- case ch <- sd.TargetGroups:
- case <-ctx.Done():
- }
- close(ch)
-}
diff --git a/discovery/manager_test.go b/discovery/manager_test.go
index 1dd10baf47..38a93be9f4 100644
--- a/discovery/manager_test.go
+++ b/discovery/manager_test.go
@@ -1562,3 +1562,53 @@ func TestUnregisterMetrics(t *testing.T) {
cancel()
}
}
+
+// Calling ApplyConfig() that removes providers at the same time as shutting down
+// the manager should not hang.
+func TestConfigReloadAndShutdownRace(t *testing.T) {
+ reg := prometheus.NewRegistry()
+ _, sdMetrics := NewTestMetrics(t, reg)
+
+ mgrCtx, mgrCancel := context.WithCancel(context.Background())
+ discoveryManager := NewManager(mgrCtx, promslog.NewNopLogger(), reg, sdMetrics)
+ require.NotNil(t, discoveryManager)
+ discoveryManager.updatert = 100 * time.Millisecond
+
+ var wgDiscovery sync.WaitGroup
+ wgDiscovery.Add(1)
+ go func() {
+ discoveryManager.Run()
+ wgDiscovery.Done()
+ }()
+ time.Sleep(time.Millisecond * 200)
+
+ var wgBg sync.WaitGroup
+ updateChan := discoveryManager.SyncCh()
+ wgBg.Add(1)
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ defer wgBg.Done()
+ select {
+ case <-ctx.Done():
+ return
+ case <-updateChan:
+ }
+ }()
+
+ c := map[string]Configs{
+ "prometheus": {staticConfig("bar:9090")},
+ }
+ discoveryManager.ApplyConfig(c)
+
+ delete(c, "prometheus")
+ wgBg.Add(1)
+ go func() {
+ discoveryManager.ApplyConfig(c)
+ wgBg.Done()
+ }()
+ mgrCancel()
+ wgDiscovery.Wait()
+
+ cancel()
+ wgBg.Wait()
+}
diff --git a/discovery/registry.go b/discovery/registry.go
index 93b88ccfab..92fa3d3d16 100644
--- a/discovery/registry.go
+++ b/discovery/registry.go
@@ -266,7 +266,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) {
err := rmm.Register()
if err != nil {
- return nil, errors.New("failed to create service discovery refresh metrics")
+ return nil, fmt.Errorf("failed to create service discovery refresh metrics: %w", err)
}
metrics := make(map[string]DiscovererMetrics)
@@ -274,7 +274,7 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag
currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm)
err = currentSdMetrics.Register()
if err != nil {
- return nil, errors.New("failed to create service discovery metrics")
+ return nil, fmt.Errorf("failed to create service discovery metrics: %w", err)
}
metrics[conf.Name()] = currentSdMetrics
}
diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md
index 92e0ac0030..ec9e5d62af 100644
--- a/docs/command-line/promtool.md
+++ b/docs/command-line/promtool.md
@@ -142,7 +142,7 @@ Check if the Prometheus server is healthy.
| Flag | Description | Default |
| --- | --- | --- |
-| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | |
+| --http.config.file | HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool | |
| --url | The URL for the Prometheus server. | `http://localhost:9090` |
@@ -158,7 +158,7 @@ Check if the Prometheus server is ready.
| Flag | Description | Default |
| --- | --- | --- |
-| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | |
+| --http.config.file | HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool | |
| --url | The URL for the Prometheus server. | `http://localhost:9090` |
@@ -213,7 +213,7 @@ Run query against a Prometheus server.
| Flag | Description | Default |
| --- | --- | --- |
| -o, --format | Output format of the query. | `promql` |
-| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | |
+| --http.config.file | HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool | |
@@ -404,7 +404,7 @@ Push to a Prometheus server.
| Flag | Description |
| --- | --- |
-| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. |
+| --http.config.file | HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool |
@@ -672,7 +672,7 @@ Create blocks of data for new recording rules.
| Flag | Description | Default |
| --- | --- | --- |
-| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | |
+| --http.config.file | HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool | |
| --url | The URL for the Prometheus API with the data where the rule will be backfilled from. | `http://localhost:9090` |
| --start | The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required. | |
| --end | If an end time is provided, all recording rules in the rule files provided will be backfilled to the end time. Default will backfill up to 3 hours ago. Must be a RFC3339 formatted date or Unix timestamp. | |
diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md
index afda28fb8c..17b40b8f4d 100644
--- a/docs/configuration/configuration.md
+++ b/docs/configuration/configuration.md
@@ -197,6 +197,11 @@ otlp:
# - "NoUTF8EscapingWithSuffixes" is a mode that relies on UTF-8 support in Prometheus.
# It preserves all special characters like dots, but still adds required metric name suffixes
# for units and _total, as UnderscoreEscapingWithSuffixes does.
+ # - "UnderscoreEscapingWithoutSuffixes" translates metric name characters that
+ # are not alphanumerics/underscores/colons to underscores, and label name
+ # characters that are not alphanumerics/underscores to underscores, but
+ # unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to
+ # the names.
# - (EXPERIMENTAL) "NoTranslation" is a mode that relies on UTF-8 support in Prometheus.
# It preserves all special character like dots and won't append special suffixes for metric
# unit and type.
@@ -257,7 +262,7 @@ job_name:
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
[ scrape_protocols: [, ...] | default = ]
-# Fallback protocol to use if a scrape returns blank, unparseable, or otherwise
+# Fallback protocol to use if a scrape returns blank, unparsable, or otherwise
# invalid Content-Type.
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
@@ -2283,7 +2288,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
See below for the configuration options for STACKIT discovery:
```yaml
-# The STACKIT project
+# The STACKIT project
project:
# STACKIT region to use. No automatic discovery of the region is done.
diff --git a/docs/configuration/promtool.md b/docs/configuration/promtool.md
new file mode 100644
index 0000000000..d127ad080c
--- /dev/null
+++ b/docs/configuration/promtool.md
@@ -0,0 +1,175 @@
+---
+title: HTTP configuration for promtool
+sort_rank: 6
+---
+
+Promtool is a versatile CLI tool for Prometheus that supports validation, debugging, querying, unit testing, tsdb management, pushing data, and experimental PromQL editing.
+
+Prometheus supports basic authentication and TLS. Since promtool needs to connect to Prometheus, we need to provide the authentication details. To specify those authentication details, use the `--http.config.file` for all requests that need to communicate with Prometheus.
+For instance, if you would like to check whether your local Prometheus server is healthy, you would use:
+```bash
+promtool check healthy --url=http://localhost:9090 --http.config.file=http-config-file.yml
+```
+
+The file is written in [YAML format](https://en.wikipedia.org/wiki/YAML), defined by the schema described below.
+Brackets indicate that a parameter is optional. For non-list parameters the value is set to the specified default.
+
+The file is read upon every http request, such as any change in the
+configuration and the certificates is picked up immediately.
+
+Generic placeholders are defined as follows:
+
+* ``: a boolean that can take the values `true` or `false`
+* ``: a valid path to a file
+* ``: a regular string that is a secret, such as a password
+* ``: a regular string
+
+A valid example file can be found [here](/documentation/examples/promtool-http-config-file.yml).
+
+```yaml
+# Note that `basic_auth` and `authorization` options are mutually exclusive.
+
+# Sets the `Authorization` header with the configured username and password.
+# `username_ref` and `password_ref`refer to the name of the secret within the secret manager.
+# `password`, `password_file` and `password_ref` are mutually exclusive.
+basic_auth:
+ [ username: ]
+ [ username_file: ]
+ [ username_ref: ]
+ [ password: ]
+ [ password_file: ]
+ [ password_ref: ]
+
+# Optional the `Authorization` header configuration.
+authorization:
+ # Sets the authentication type.
+ [ type: | default: Bearer ]
+ # Sets the credentials. It is mutually exclusive with
+ # `credentials_file`.
+ [ credentials: ]
+ # Sets the credentials with the credentials read from the configured file.
+ # It is mutually exclusive with `credentials`.
+ [ credentials_file: ]
+ [ credentials_ref: ]
+
+# Optional OAuth 2.0 configuration.
+# Cannot be used at the same time as basic_auth or authorization.
+oauth2:
+ [ ]
+
+tls_config:
+ [ ]
+
+[ follow_redirects: | default: true ]
+
+# Whether to enable HTTP2.
+[ enable_http2: | default: true ]
+
+# Optional proxy URL.
+[ proxy_url: ]
+# Comma-separated string that can contain IPs, CIDR notation, domain names
+# that should be excluded from proxying. IP and domain names can
+# contain port numbers.
+[ no_proxy: ]
+[ proxy_from_environment: ]
+[ proxy_connect_header:
+ [ : [ , ... ] ] ]
+
+# `http_headers` specifies a set of headers that will be injected into each request.
+http_headers:
+ [ : ]
+```
+
+## \
+OAuth 2.0 authentication using the client credentials grant type.
+```yaml
+# `client_id` and `client_secret` are used to authenticate your
+# application with the authorization server in order to get
+# an access token.
+# `client_secret`, `client_secret_file` and `client_secret_ref` are mutually exclusive.
+client_id:
+[ client_secret: ]
+[ client_secret_file: ]
+[ client_secret_ref: ]
+
+# `scopes` specify the reason for the resource access.
+scopes:
+ [ - ...]
+
+# The URL to fetch the token from.
+token_url:
+
+# Optional parameters to append to the token URL.
+[ endpoint_params:
+ : ... ]
+
+# Configures the token request's TLS settings.
+tls_config:
+ [ ]
+
+# Optional proxy URL.
+[ proxy_url: ]
+# Comma-separated string that can contain IPs, CIDR notation, domain names
+# that should be excluded from proxying. IP and domain names can
+# contain port numbers.
+[ no_proxy: ]
+[ proxy_from_environment: ]
+[ proxy_connect_header:
+ [ : [ , ... ] ] ]
+```
+
+##
+```yaml
+# For the following configurations, use either `ca`, `cert` and `key` or `ca_file`, `cert_file` and `key_file` or use `ca_ref`, `cert_ref` or `key_ref`.
+# Text of the CA certificate to use for the server.
+[ ca: ]
+# CA certificate to validate the server certificate with.
+[ ca_file: ]
+# `ca_ref` is the name of the secret within the secret manager to use as the CA cert.
+[ ca_ref: ]
+
+# Text of the client cert file for the server.
+[ cert: ]
+# Certificate file for client certificate authentication.
+[ cert_file: ]
+# `cert_ref` is the name of the secret within the secret manager to use as the client certificate.
+[ cert_ref: ]
+
+# Text of the client key file for the server.
+[ key: ]
+# Key file for client certificate authentication.
+[ key_file: ]
+# `key_ref` is the name of the secret within the secret manager to use as the client key.
+[ key_ref: ]
+
+# ServerName extension to indicate the name of the server.
+# http://tools.ietf.org/html/rfc4366#section-3.1
+[ server_name: ]
+
+# Disable validation of the server certificate.
+[ insecure_skip_verify: ]
+
+# Minimum acceptable TLS version. Accepted values: TLS10 (TLS 1.0), TLS11 (TLS
+# 1.1), TLS12 (TLS 1.2), TLS13 (TLS 1.3).
+# If unset, promtool will use Go default minimum version, which is TLS 1.2.
+# See MinVersion in https://pkg.go.dev/crypto/tls#Config.
+[ min_version: ]
+# Maximum acceptable TLS version. Accepted values: TLS10 (TLS 1.0), TLS11 (TLS
+# 1.1), TLS12 (TLS 1.2), TLS13 (TLS 1.3).
+# If unset, promtool will use Go default maximum version, which is TLS 1.3.
+# See MaxVersion in https://pkg.go.dev/crypto/tls#Config.
+[ max_version: ]
+```
+
+## \
+`header` represents the configuration for a single HTTP header.
+```yaml
+[ values:
+ [ - ... ] ]
+
+[ secrets:
+ [ - ... ] ]
+
+[ files:
+ [ - ... ] ]
+```
diff --git a/docs/configuration/template_reference.md b/docs/configuration/template_reference.md
index 57f2606b13..300b8666a4 100644
--- a/docs/configuration/template_reference.md
+++ b/docs/configuration/template_reference.md
@@ -55,8 +55,10 @@ If functions are used in a pipeline, the pipeline value is passed as the last ar
| humanize1024 | number or string | string | Like `humanize`, but uses 1024 as the base rather than 1000. |
| humanizeDuration | number or string | string | Converts a duration in seconds to a more readable format. |
| humanizePercentage | number or string | string | Converts a ratio value to a fraction of 100. |
-| humanizeTimestamp | number or string | string | Converts a Unix timestamp in seconds to a more readable format. |
-| toTime | number or string | *time.Time | Converts a Unix timestamp in seconds to a time.Time. |
+| humanizeTimestamp | number or string | string | Converts a Unix timestamp in seconds to a more readable format. |
+| toTime | number or string | *time.Time | Converts a Unix timestamp in seconds to a time.Time. |
+| toDuration | number or string | *time.Duration | Converts a duration in seconds to a time.Duration. |
+| now | none | float64 | Returns the Unix timestamp in seconds at the time of the template evaluation. |
Humanizing functions are intended to produce reasonable output for consumption
by humans, and are not guaranteed to return the same results between Prometheus
diff --git a/docs/feature_flags.md b/docs/feature_flags.md
index 08981ee931..4c390ab92e 100644
--- a/docs/feature_flags.md
+++ b/docs/feature_flags.md
@@ -181,6 +181,8 @@ This state is periodically ([`max_stale`][d2c]) cleared of inactive series.
Enabling this _can_ have negative impact on performance, because the in-memory
state is mutex guarded. Cumulative-only OTLP requests are not affected.
+[d2c]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor
+
## PromQL arithmetic expressions in time durations
`--enable-feature=promql-duration-expr`
@@ -203,6 +205,12 @@ When using offset with duration expressions, you must wrap the expression in
parentheses. Without parentheses, only the first duration value will be used in
the offset calculation.
+`step()` can be used in duration expressions.
+For a **range query**, it resolves to the step width of the range query.
+For an **instant query**, it resolves to `0s`.
+
+`min(, )` and `max(, )` can be used to find the minimum or maximum of two duration expressions.
+
**Note**: Duration expressions are not supported in the @ timestamp operator.
The following operators are supported:
@@ -216,14 +224,16 @@ The following operators are supported:
Examples of equivalent durations:
-* `5m * 2` is the equivalent to `10m` or `600s`
-* `10m - 1m` is the equivalent to `9m` or `540s`
-* `(5+2) * 1m` is the equivalent to `7m` or `420s`
-* `1h / 2` is the equivalent to `30m` or `1800s`
-* `4h % 3h` is the equivalent to `1h` or `3600s`
-* `(2 ^ 3) * 1m` is the equivalent to `8m` or `480s`
+* `5m * 2` is equivalent to `10m` or `600s`
+* `10m - 1m` is equivalent to `9m` or `540s`
+* `(5+2) * 1m` is equivalent to `7m` or `420s`
+* `1h / 2` is equivalent to `30m` or `1800s`
+* `4h % 3h` is equivalent to `1h` or `3600s`
+* `(2 ^ 3) * 1m` is equivalent to `8m` or `480s`
+* `step() + 1` is equivalent to the query step width increased by 1s.
+* `max(step(), 5s)` is equivalent to the larger of the query step width and `5s`.
+* `min(2 * step() + 5s, 5m)` is equivalent to the smaller of twice the query step increased by `5s` and `5m`.
-[d2c]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor
## OTLP Native Delta Support
diff --git a/docs/querying/operators.md b/docs/querying/operators.md
index 5e4f127b86..3f20c842f9 100644
--- a/docs/querying/operators.md
+++ b/docs/querying/operators.md
@@ -25,47 +25,58 @@ and vector/vector value pairs. They follow the usual [IEEE 754 floating point
arithmetic](https://en.wikipedia.org/wiki/IEEE_754), including the handling of
special values like `NaN`, `+Inf`, and `-Inf`.
-**Between two scalars**, the behavior is obvious: they evaluate to another
+**Between two scalars**, the behavior is straightforward: they evaluate to another
scalar that is the result of the operator applied to both scalar operands.
**Between an instant vector and a scalar**, the operator is applied to the
-value of every data sample in the vector. If the data sample is a float, the
-operation performed on the data sample is again obvious, e.g. if an instant
-vector of float samples is multiplied by 2, the result is another vector of
-float samples in which every sample value of the original vector is multiplied
-by 2. For vector elements that are histogram samples, the behavior is the
-following: For `*`, all bucket populations and the count and the sum of
-observations are multiplied by the scalar. For `/`, the histogram sample has to
-be on the left hand side (LHS), followed by the scalar on the right hand side
-(RHS). All bucket populations and the count and the sum of observations are
-then divided by the scalar. A division by zero results in a histogram with no
-regular buckets and the zero bucket population and the count and sum of
-observations all set to +Inf, -Inf, or NaN, depending on their values in the
-input histogram (positive, negative, or zero/NaN, respectively). For `/` with a
-scalar on the LHS and a histogram sample on the RHS, and similarly for all
-other arithmetic binary operators in any combination of a scalar and a
-histogram sample, there is no result and the corresponding element is removed
-from the resulting vector. Such a removal is flagged by an info-level
-annotation.
+value of every data sample in the vector.
+
+If the data sample is a float, the operation is performed between that float and the scalar.
+For example, if an instant vector of float samples is multiplied by 2,
+the result is another vector of float samples in which every sample value of
+the original vector is multiplied by 2.
+
+For vector elements that are histogram samples, the behavior is the
+following:
+
+* For `*`, all bucket populations and the count and the sum of observations
+ are multiplied by the scalar.
+
+* For `/`, the histogram sample has to be on the left hand side (LHS), followed
+ by the scalar on the right hand side (RHS). All bucket populations and the count
+ and the sum of observations are then divided by the scalar. A division by zero
+ results in a histogram with no regular buckets and the zero bucket population
+ and the count and sum of observations all set to `+Inf`, `-Inf`, or `NaN`, depending
+ on their values in the input histogram (positive, negative, or zero/`NaN`, respectively).
+
+* For `/` with a scalar on the LHS and a histogram sample on the RHS, and similarly for all
+ other arithmetic binary operators in any combination of a scalar and a
+ histogram sample, there is no result and the corresponding element is removed
+ from the resulting vector. Such a removal is flagged by an info-level
+ annotation.
**Between two instant vectors**, a binary arithmetic operator is applied to
each entry in the LHS vector and its [matching element](#vector-matching) in
the RHS vector. The result is propagated into the result vector with the
grouping labels becoming the output label set. Entries for which no matching
-entry in the right-hand vector can be found are not part of the result. If two
-float samples are matched, the behavior is obvious. If a float sample is
-matched with a histogram sample, the behavior follows the same logic as between
-a scalar and a histogram sample (see above), i.e. `*` and `/` (the latter with
-the histogram sample on the LHS) are valid operations, while all others lead to
-the removal of the corresponding element from the resulting vector. If two
-histogram samples are matched, only `+` and `-` are valid operations, each
-adding or substracting all matching bucket populations and the count and the
+entry in the right-hand vector can be found are not part of the result.
+
+If two float samples are matched, the arithmetic operator is applied to the two input values.
+
+If a float sample is matched with a histogram sample, the behavior follows the same
+logic as between a scalar and a histogram sample (see above), i.e. `*` and `/`
+(the latter with the histogram sample on the LHS) are valid operations, while all
+others lead to the removal of the corresponding element from the resulting vector.
+
+If two histogram samples are matched, only `+` and `-` are valid operations, each
+adding or subtracting all matching bucket populations and the count and the
sum of observations. All other operations result in the removal of the
corresponding element from the output vector, flagged by an info-level
annotation.
**In any arithmetic binary operation involving vectors**, the metric name is
-dropped.
+dropped. This occurs even if `__name__` is explicitly mentioned in `on`
+(see https://github.com/prometheus/prometheus/issues/16631 for further discussion).
### Trigonometric binary operators
@@ -102,8 +113,8 @@ operators result in another scalar that is either `0` (`false`) or `1`
**Between an instant vector and a scalar**, these operators are applied to the
value of every data sample in the vector, and vector elements between which the
-comparison result is `false` get dropped from the result vector. These
-operation only work with float samples in the vector. For histogram samples,
+comparison result is false get dropped from the result vector. These
+operations only work with float samples in the vector. For histogram samples,
the corresponding element is removed from the result vector, flagged by an
info-level annotation.
@@ -111,19 +122,33 @@ info-level annotation.
applied to matching entries. Vector elements for which the expression is not
true or which do not find a match on the other side of the expression get
dropped from the result, while the others are propagated into a result vector
-with the grouping labels becoming the output label set. Matches between two
-float samples work as usual, while matches between a float sample and a
-histogram sample are invalid. The corresponding element is removed from the
-result vector, flagged by an info-level annotation. Between two histogram
-samples, `==` and `!=` work as expected, but all other comparison binary
-operations are again invalid.
+with the grouping labels becoming the output label set.
+
+Matches between two float samples work as usual.
+
+Matches between a float sample and a histogram sample are invalid, and the
+corresponding element is removed from the result vector, flagged by an info-level
+annotation.
+
+Between two histogram samples, `==` and `!=` work as expected, but all other
+comparison binary operations are again invalid.
**In any comparison binary operation involving vectors**, providing the `bool`
-modifier changes the behavior in the following way: Vector elements that would
-be dropped instead have the value `0` and vector elements that would be kept
-have the value `1`. Additionally, the metric name is dropped. (Note that
-invalid operations involving histogram samples still return no result rather
-than the value `0`.)
+modifier changes the behavior in the following ways:
+
+* Vector elements which find a match on the other side of the expression but for
+ which the expression is false instead have the value `0` and vector elements
+ that do find a match and for which the expression is true have the value `1`.
+ (Note that elements with no match or invalid operations involving histogram
+ samples still return no result rather than the value `0`.)
+* The metric name is dropped.
+
+If the `bool` modifier is not provided, then the metric name from the left side
+is retained, with some exceptions:
+
+* If `on` is used, then the metric name is dropped.
+* If `group_right` is used, then the metric name from the right side is retained,
+ to avoid collisions.
### Logical/set binary operators
@@ -259,21 +284,21 @@ Prometheus supports the following built-in aggregation operators that can be
used to aggregate the elements of a single instant vector, resulting in a new
vector of fewer elements with aggregated values:
-* `sum` (calculate sum over dimensions)
-* `avg` (calculate the arithmetic average over dimensions)
-* `min` (select minimum over dimensions)
-* `max` (select maximum over dimensions)
-* `bottomk` (smallest _k_ elements by sample value)
-* `topk` (largest _k_ elements by sample value)
-* `limitk` (sample _k_ elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
-* `limit_ratio` (sample a pseudo-random ratio _r_ of elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
-* `group` (all values in the resulting vector are 1)
-* `count` (count number of elements in the vector)
-* `count_values` (count number of elements with the same value)
+* `sum(v)` (calculate sum over dimensions)
+* `avg(v)` (calculate the arithmetic average over dimensions)
+* `min(v)` (select minimum over dimensions)
+* `max(v)` (select maximum over dimensions)
+* `bottomk(k, v)` (smallest `k` elements by sample value)
+* `topk(k, v)` (largest `k` elements by sample value)
+* `limitk(k, v)` (sample `k` elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
+* `limit_ratio(r, v)` (sample a pseudo-random ratio `r` of elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
+* `group(v)` (all values in the resulting vector are 1)
+* `count(v)` (count number of elements in the vector)
+* `count_values(l, v)` (count number of elements with the same value)
-* `stddev` (calculate population standard deviation over dimensions)
-* `stdvar` (calculate population standard variance over dimensions)
-* `quantile` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions)
+* `stddev(v)` (calculate population standard deviation over dimensions)
+* `stdvar(v)` (calculate population standard variance over dimensions)
+* `quantile(φ, v)` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions)
These operators can either be used to aggregate over **all** label dimensions
or preserve distinct dimensions by including a `without` or `by` clause. These
@@ -293,29 +318,62 @@ all other labels are preserved in the output. `by` does the opposite and drops
labels that are not listed in the `by` clause, even if their label values are
identical between all elements of the vector.
-`parameter` is only required for `topk`, `bottomk`, `limitk`, `limit_ratio`,
-`quantile`, and `count_values`. It is used as the value for _k_, _r_, φ, or the
-name of the additional label, respectively.
-
### Detailed explanations
-`sum` sums up sample values in the same way as the `+` binary operator does
-between two values. Similarly, `avg` divides the sum by the number of
-aggregated samples in the same way as the `/` binary operator. Therefore, all
-sample values aggregation into a single resulting vector element must either be
+#### `sum`
+
+`sum(v)` sums up sample values in `v` in the same way as the `+` binary operator does
+between two values.
+
+All sample values being aggregated into a single resulting vector element must either be
float samples or histogram samples. An aggregation of a mix of both is invalid,
-resulting in the removeal of the corresponding vector element from the output
+resulting in the removal of the corresponding vector element from the output
vector, flagged by a warn-level annotation.
-`min` and `max` only operate on float samples, following IEEE 754 floating
+##### Examples
+
+If the metric `memory_consumption_bytes` had time series that fan out by
+`application`, `instance`, and `group` labels, we could calculate the total
+memory consumption per application and group over all instances via:
+
+ sum without (instance) (memory_consumption_bytes)
+
+Which is equivalent to:
+
+ sum by (application, group) (memory_consumption_bytes)
+
+If we are just interested in the total memory consumption in **all**
+applications, we could simply write:
+
+ sum(memory_consumption_bytes)
+
+#### `avg`
+
+`avg(v)` divides the sum of `v` by the number of aggregated samples in the same way
+as the `/` binary operator.
+
+All sample values being aggregated into a single resulting vector element must either be
+float samples or histogram samples. An aggregation of a mix of both is invalid,
+resulting in the removal of the corresponding vector element from the output
+vector, flagged by a warn-level annotation.
+
+#### `min` and `max`
+
+`min(v)` and `max(v)` return the minimum or maximum value, respectively, in `v`.
+
+They only operate on float samples, following IEEE 754 floating
point arithmetic, which in particular implies that `NaN` is only ever
considered a minimum or maximum if all aggregated values are `NaN`. Histogram
samples in the input vector are ignored, flagged by an info-level annotation.
-`topk` and `bottomk` are different from other aggregators in that a subset of
-the input samples, including the original labels, are returned in the result
-vector. `by` and `without` are only used to bucket the input vector. Similar to
-`min` and `max`, they only operate on float samples, considering `NaN` values
+#### `topk` and `bottomk`
+
+`topk(k, v)` and `bottomk(k, v)` are different from other aggregators in that a subset of
+`k` values from the input samples, including the original labels, are returned in the result vector.
+
+`by` and `without` are only used to bucket the input vector.
+
+Similar to `min` and `max`, they only operate on float samples, considering `NaN` values
to be farthest from the top or bottom, respectively. Histogram samples in the
input vector are ignored, flagged by an info-level annotation.
@@ -323,72 +381,108 @@ If used in an instant query, `topk` and `bottomk` return series ordered by
value in descending or ascending order, respectively. If used with `by` or
`without`, then series within each bucket are sorted by value, and series in
the same bucket are returned consecutively, but there is no guarantee that
-buckets of series will be returned in any particular order. No sorting applies
-to range queries.
+buckets of series will be returned in any particular order.
-`limitk` and `limit_ratio` also return a subset of the input samples, including
-the original labels in the result vector. The subset is selected in a
-deterministic pseudo-random way. `limitk` picks _k_ samples, while
-`limit_ratio` picks a ratio _r_ of samples (each determined by `parameter`).
-This happens independent of the sample type. Therefore, it works for both float
-samples and histogram samples. _r_ can be between +1 and -1. The absolute value
-of _r_ is used as the selection ratio, but the selection order is inverted for
-a negative _r_, which can be used to select complements. For example,
-`limit_ratio(0.1, ...)` returns a deterministic set of approximatiely 10% of
+No sorting applies to range queries.
+
+##### Example
+
+To get the 5 instances with the highest memory consumption across all instances we could write:
+
+ topk(5, memory_consumption_bytes)
+
+#### `limitk` and `limit_ratio`
+
+`limitk(k, v)` returns a subset of `k` input samples, including
+the original labels in the result vector.
+
+The subset is selected in a deterministic pseudo-random way.
+This happens independent of the sample type.
+Therefore, it works for both float samples and histogram samples.
+
+##### Example
+
+To sample 10 timeseries we could write:
+
+ limitk(10, memory_consumption_bytes)
+
+#### `limit_ratio`
+
+`limit_ratio(r, v)` returns a subset of the input samples, including
+the original labels in the result vector.
+
+The subset is selected in a deterministic pseudo-random way.
+This happens independent of the sample type.
+Therefore, it works for both float samples and histogram samples.
+
+`r` can be between +1 and -1. The absolute value of `r` is used as the selection ratio,
+but the selection order is inverted for a negative `r`, which can be used to select complements.
+For example, `limit_ratio(0.1, ...)` returns a deterministic set of approximatiely 10% of
the input samples, while `limit_ratio(-0.9, ...)` returns precisely the
-remaining approximately 90% of the input samples not returned by
-`limit_ratio(0.1, ...)`.
+remaining approximately 90% of the input samples not returned by `limit_ratio(0.1, ...)`.
-`group` and `count` do not interact with the sample values,
-they work in the same way for float samples and histogram samples.
+#### `group`
+
+`group(v)` returns 1 for each group that contains any value at that timestamp.
+
+The value may be a float or histogram sample.
+
+#### `count`
+
+`count(v)` returns the number of values at that timestamp, or no value at all
+if no values are present at that timestamp.
+
+The value may be a float or histogram sample.
+
+#### `count_values`
+
+`count_values(l, v)` outputs one time series per unique sample value in `v`.
+Each series has an additional label, given by `l`, and the label value is the
+unique sample value. The value of each time series is the number of times that sample value was present.
-`count_values` outputs one time series per unique sample value. Each series has
-an additional label. The name of that label is given by the aggregation
-parameter, and the label value is the unique sample value. The value of each
-time series is the number of times that sample value was present.
`count_values` works with both float samples and histogram samples. For the
latter, a compact string representation of the histogram sample value is used
as the label value.
-`stddev` and `stdvar` only work with float samples, following IEEE 754 floating
-point arithmetic. Histogram samples in the input vector are ignored, flagged by
-an info-level annotation.
-
-`quantile` calculates the φ-quantile, the value that ranks at number φ*N among
-the N metric values of the dimensions aggregated over. φ is provided as the
-aggregation parameter. For example, `quantile(0.5, ...)` calculates the median,
-`quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned.
-For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned.
-
-### Examples
-
-If the metric `http_requests_total` had time series that fan out by
-`application`, `instance`, and `group` labels, we could calculate the total
-number of seen HTTP requests per application and group over all instances via:
-
- sum without (instance) (http_requests_total)
-
-Which is equivalent to:
-
- sum by (application, group) (http_requests_total)
-
-If we are just interested in the total of HTTP requests we have seen in **all**
-applications, we could simply write:
-
- sum(http_requests_total)
+##### Example
To count the number of binaries running each build version we could write:
count_values("version", build_version)
-To get the 5 largest HTTP requests counts across all instances we could write:
+#### `stddev`
- topk(5, http_requests_total)
+`stddev(v)` returns the standard deviation of `v`.
-To sample 10 timeseries, for example to inspect labels and their values, we
-could write:
+`stddev` only works with float samples, following IEEE 754 floating
+point arithmetic. Histogram samples in the input vector are ignored, flagged by
+an info-level annotation.
- limitk(10, http_requests_total)
+#### `stdvar`
+
+`stdvar(v)` returns the standard deviation of `v`.
+
+`stdvar` only works with float samples, following IEEE 754 floating
+point arithmetic. Histogram samples in the input vector are ignored, flagged by
+an info-level annotation.
+
+#### `quantile`
+
+`quantile(φ, v)` calculates the φ-quantile, the value that ranks at number φ*N among
+the N metric values of the dimensions aggregated over.
+
+`quantile` only works with float samples. Histogram samples in the input vector
+are ignored, flagged by an info-level annotation.
+
+`NaN` is considered the smallest possible value.
+
+For example, `quantile(0.5, ...)` calculates the median, `quantile(0.95, ...)` the 95th percentile.
+
+Special cases:
+
+* For φ = `NaN`, `NaN` is returned.
+* For φ < 0, `-Inf` is returned.
+* For φ > 1, `+Inf` is returned.
## Binary operator precedence
diff --git a/documentation/examples/promtool-http-config-file.yml b/documentation/examples/promtool-http-config-file.yml
new file mode 100644
index 0000000000..ef07ab8250
--- /dev/null
+++ b/documentation/examples/promtool-http-config-file.yml
@@ -0,0 +1,12 @@
+# TLS and basic authentication configuration example.
+
+# For `basic_auth`, use the Prometheus credentials configured earlier in the `web-config.yml` file.
+# The password must be provided in plaintext.
+# To avoid including plaintext passwords directly in this file, consider using `password_file` or `password_ref` instead.
+basic_auth:
+ username: alice
+ password: verylongpassword
+
+tls_config:
+ cert_file: server.crt
+ key_file: server.key
diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet
index 3b661a14f4..adf5da5c12 100644
--- a/documentation/prometheus-mixin/dashboards.libsonnet
+++ b/documentation/prometheus-mixin/dashboards.libsonnet
@@ -445,6 +445,7 @@ local row = panel.row;
dashboard.new('%(prefix)sOverview' % $._config.grafanaPrometheus)
+ dashboard.time.withFrom('now-1h')
+ dashboard.withTags($._config.grafanaPrometheus.tags)
+ + dashboard.withUid('9fa0d141-d019-4ad7-8bc5-42196ee308bd')
+ dashboard.timepicker.withRefreshIntervals($._config.grafanaPrometheus.refresh)
+ dashboard.withVariables(std.prune([
datasourceVariable,
diff --git a/go.mod b/go.mod
index 908a3ca80b..9c87f44f7a 100644
--- a/go.mod
+++ b/go.mod
@@ -15,8 +15,8 @@ require (
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
- github.com/digitalocean/godo v1.152.0
- github.com/docker/docker v28.2.2+incompatible
+ github.com/digitalocean/godo v1.157.0
+ github.com/docker/docker v28.3.0+incompatible
github.com/edsrzf/mmap-go v1.2.0
github.com/envoyproxy/go-control-plane/envoy v1.32.4
github.com/envoyproxy/protoc-gen-validate v1.2.1
@@ -38,19 +38,19 @@ require (
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.18.0
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
- github.com/linode/linodego v1.52.1
+ github.com/linode/linodego v1.52.2
github.com/miekg/dns v1.1.66
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
- github.com/oklog/run v1.1.0
+ github.com/oklog/run v1.2.0
github.com/oklog/ulid/v2 v2.1.1
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0
- github.com/ovh/go-ovh v1.8.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0
+ github.com/ovh/go-ovh v1.9.0
github.com/prometheus/alertmanager v0.28.1
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/client_model v0.6.2
- github.com/prometheus/common v0.65.0
+ github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3
github.com/prometheus/common/assets v0.2.0
github.com/prometheus/exporter-toolkit v0.14.0
github.com/prometheus/sigv4 v0.2.0
@@ -59,10 +59,10 @@ require (
github.com/stackitcloud/stackit-sdk-go/core v0.17.2
github.com/stretchr/testify v1.10.0
github.com/vultr/govultr/v2 v2.17.2
- go.opentelemetry.io/collector/component v1.34.0
- go.opentelemetry.io/collector/consumer v1.34.0
- go.opentelemetry.io/collector/pdata v1.34.0
- go.opentelemetry.io/collector/processor v1.34.0
+ go.opentelemetry.io/collector/component v1.35.0
+ go.opentelemetry.io/collector/consumer v1.35.0
+ go.opentelemetry.io/collector/pdata v1.35.0
+ go.opentelemetry.io/collector/processor v1.35.0
go.opentelemetry.io/collector/semconv v0.128.0
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0
@@ -81,7 +81,7 @@ require (
golang.org/x/sync v0.15.0
golang.org/x/sys v0.33.0
golang.org/x/text v0.26.0
- google.golang.org/api v0.238.0
+ google.golang.org/api v0.239.0
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6
@@ -115,8 +115,8 @@ require (
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
- go.opentelemetry.io/collector/featuregate v1.34.0 // indirect
- go.opentelemetry.io/collector/internal/telemetry v0.128.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.35.0 // indirect
+ go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
go.opentelemetry.io/otel/log v0.12.2 // indirect
)
@@ -153,7 +153,7 @@ require (
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-openapi/validate v0.24.0 // indirect
github.com/go-resty/resty/v2 v2.16.5 // indirect
- github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
+ github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
@@ -180,7 +180,7 @@ require (
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/knadh/koanf/maps v0.1.2 // indirect
github.com/knadh/koanf/providers/confmap v1.0.0 // indirect
- github.com/knadh/koanf/v2 v2.2.0 // indirect
+ github.com/knadh/koanf/v2 v2.2.1 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
@@ -197,8 +197,8 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/oklog/ulid v1.3.1 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
@@ -206,7 +206,7 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/otlptranslator v0.0.0-20250527173959-2573485683d5
+ github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588
github.com/prometheus/procfs v0.15.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
@@ -214,9 +214,9 @@ require (
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/collector/confmap v1.34.0 // indirect
- go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.128.0 // indirect
+ go.opentelemetry.io/collector/confmap v1.35.0 // indirect
+ go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.129.0 // indirect
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.39.0 // indirect
@@ -225,7 +225,7 @@ require (
golang.org/x/net v0.41.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/time v0.12.0 // indirect
- golang.org/x/tools v0.33.0 // indirect
+ golang.org/x/tools v0.34.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
diff --git a/go.sum b/go.sum
index 134ebb90ec..41701a1bdc 100644
--- a/go.sum
+++ b/go.sum
@@ -110,14 +110,14 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
-github.com/digitalocean/godo v1.152.0 h1:WRgkPMogZSXEJK70IkZKTB/PsMn16hMQ+NI3wCIQdzA=
-github.com/digitalocean/godo v1.152.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
+github.com/digitalocean/godo v1.157.0 h1:ReELaS6FxXNf8gryUiVH0wmyUmZN8/NCmBX4gXd3F0o=
+github.com/digitalocean/godo v1.157.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
-github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw=
-github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v28.3.0+incompatible h1:ffS62aKWupCWdvcee7nBU9fhnmknOqDPaJAMtfK0ImQ=
+github.com/docker/docker v28.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -176,8 +176,8 @@ github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
-github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
-github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk=
+github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I=
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
@@ -314,8 +314,8 @@ github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpb
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A=
-github.com/knadh/koanf/v2 v2.2.0 h1:FZFwd9bUjpb8DyCWARUBy5ovuhDs1lI87dOEn2K8UVU=
-github.com/knadh/koanf/v2 v2.2.0/go.mod h1:PSFru3ufQgTsI7IF+95rf9s8XA1+aHxKuO/W+dPoHEY=
+github.com/knadh/koanf/v2 v2.2.1 h1:jaleChtw85y3UdBnI0wCqcg1sj1gPoz6D3caGNHtrNE=
+github.com/knadh/koanf/v2 v2.2.1/go.mod h1:PSFru3ufQgTsI7IF+95rf9s8XA1+aHxKuO/W+dPoHEY=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -329,8 +329,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/linode/linodego v1.52.1 h1:HJ1cz1n9n3chRP9UrtqmP91+xTi0Q5l+H/4z4tpkwgQ=
-github.com/linode/linodego v1.52.1/go.mod h1:zEN2sX+cSdp67EuRY1HJiyuLujoa7HqvVwNEcJv3iXw=
+github.com/linode/linodego v1.52.2 h1:N9ozU27To1LMSrDd8WvJZ5STSz1eGYdyLnxhAR/dIZg=
+github.com/linode/linodego v1.52.2/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@@ -395,8 +395,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 h1:dOYG7LS/WK00RWZc8XGgcUTlTxpp3mKhdR2Q9z9HbXM=
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8=
-github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
-github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
+github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E=
+github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
@@ -405,20 +405,20 @@ github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 h1:hZa4FkI2JhYC0tkiwOepnHyyfWzezz3FfCmt88nWJa0=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0/go.mod h1:sLbOuJEFckPdw4li0RtWpoSsMeppcck3s/cmzPyKAgc=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.128.0 h1:+rUULr4xqOJjZK3SokFmRYzsiPq5onoWoSv3He4aaus=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.128.0/go.mod h1:Fh2SXPeFkr4J97w9CV/apFAib8TC9Hi0P08xtiT7Lng=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 h1:8OWwRSdIhm3DY3PEYJ0PtSEz1a1OjL0fghLXSr14JMk=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0/go.mod h1:32OeaysZe4vkSmD1LJ18Q1DfooryYqpSzFNmz+5A5RU=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0 h1:9wVFaWEhgV8WQD+nP662nHNaQIkmyF57KRhtsqlaWEI=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0/go.mod h1:Yak3vQIvwYQiAO83u+zD9ujdCmpcDL7JSfg2YK+Mwn4=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 h1:2pzb6bC/AAfciC9DN+8d7Y8Rsk8ZPCfp/ACTfZu87FQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0/go.mod h1:tIE4dzdxuM7HnFeYA6sj5zfLuUA/JxzQ+UDl1YrHvQw=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0 h1:ydkfqpZ5BWZfEJEs7OUhTHW59og5aZspbUYxoGcAEok=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0/go.mod h1:oA+49dkzmhUx0YFC9JXGuPPSBL0TOTp6jkv7qSr2n0Q=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 h1:AOVxBvCZfTPj0GLGqBVHpAnlC9t9pl1JXUQXymHliiY=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0/go.mod h1:0CAJ32V/bCUBhNTEvnN9wlOG5IsyZ+Bmhe9e3Eri7CU=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 h1:yDLSAoIi3jNt4R/5xN4IJ9YAg1rhOShgchlO/ESv8EY=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0/go.mod h1:IXQHbTPxqNcuu44FvkyvpYJ6Qy4wh4YsCVkKsp0Flzo=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/ovh/go-ovh v1.8.0 h1:eQ5TAAFZvZAVarQir62oaTL+8a503pIBuOWVn72iGtY=
-github.com/ovh/go-ovh v1.8.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
+github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE=
+github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@@ -454,14 +454,14 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
-github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
+github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 h1:R/zO7ombSHCI8bjQusgCMSL+cE669w5/R2upq5WlPD0=
+github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
-github.com/prometheus/otlptranslator v0.0.0-20250527173959-2573485683d5 h1:LCbPeVKZSu9RS4CsaDCOmDCcribskJ8c6H5u1VvyxY0=
-github.com/prometheus/otlptranslator v0.0.0-20250527173959-2573485683d5/go.mod h1:v1PzmPjSnNkmZSDvKJ9OmsWcmWMEF5+JdllEcXrRfzM=
+github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588 h1:QlySqDdSESgWDePeAYskbbcKKdowI26m9aU9zloHyYE=
+github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
@@ -522,40 +522,40 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/collector/component v1.34.0 h1:YONg7FaZ5zZbj5cLdARvwtMNuZHunuyxw2fWe5fcWqc=
-go.opentelemetry.io/collector/component v1.34.0/go.mod h1:GvolsSVZskXuyfQdwYacqeBSZe/1tg4RJ0YK55KSvDA=
-go.opentelemetry.io/collector/component/componentstatus v0.128.0 h1:0lEYHgUQEMMkl5FLtMgDH8lue4B3auElQINzGIWUya4=
-go.opentelemetry.io/collector/component/componentstatus v0.128.0/go.mod h1:8vVO6JSV+edmiezJsQzW7aKQ7sFLIN6S3JawKBI646o=
-go.opentelemetry.io/collector/component/componenttest v0.128.0 h1:MGNh5lQQ0Qmz2SmNwOqLJYaWMDkMLYj/51wjMzTBR34=
-go.opentelemetry.io/collector/component/componenttest v0.128.0/go.mod h1:hALNxcacqOaX/Gm/dE7sNOxAEFj41SbRqtvF57Yd6gs=
-go.opentelemetry.io/collector/confmap v1.34.0 h1:PG4sYlLxgCMnA5F7daKXZV+NKjU1IzXBzVQeyvcwyh0=
-go.opentelemetry.io/collector/confmap v1.34.0/go.mod h1:BbAit8+hAJg5vyFBQoDh9vOXOH8UzCdNu91jCh+b72E=
-go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 h1:hcVKU45pjC+PLz7xUc8kwSlR5wsN2w8hs9midZ3ez10=
-go.opentelemetry.io/collector/confmap/xconfmap v0.128.0/go.mod h1:2928x4NAAu1CysfzLbEJE6MSSDB/gOYVq6YRGWY9LmM=
-go.opentelemetry.io/collector/consumer v1.34.0 h1:oBhHH6mgViOGhVDPozE+sUdt7jFBo2Hh32lsSr2L3Tc=
-go.opentelemetry.io/collector/consumer v1.34.0/go.mod h1:DVMCb56ZBlPNcmo0lSJKn3rp18oyZQCedRE4GKIMI+Q=
-go.opentelemetry.io/collector/consumer/consumertest v0.128.0 h1:x50GB0I/QvU3sQuNCap5z/P2cnq2yHoRJ/8awkiT87w=
-go.opentelemetry.io/collector/consumer/consumertest v0.128.0/go.mod h1:Wb3IAbMY/DOIwJPy81PuBiW2GnKoNIz4THE7wfJwovE=
-go.opentelemetry.io/collector/consumer/xconsumer v0.128.0 h1:4E+KTdCjkRS3SIw0bsv5kpv9XFXHf8x9YiPEuxBVEHY=
-go.opentelemetry.io/collector/consumer/xconsumer v0.128.0/go.mod h1:OmzilL/qbjCzPMHay+WEA7/cPe5xuX7Jbj5WPIpqaMo=
-go.opentelemetry.io/collector/featuregate v1.34.0 h1:zqDHpEYy1UeudrfUCvlcJL2t13dXywrC6lwpNZ5DrCU=
-go.opentelemetry.io/collector/featuregate v1.34.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
-go.opentelemetry.io/collector/internal/telemetry v0.128.0 h1:ySEYWoY7J8DAYdlw2xlF0w+ODQi3AhYj7TRNflsCbx8=
-go.opentelemetry.io/collector/internal/telemetry v0.128.0/go.mod h1:572B/iJqjauv3aT+zcwnlNWBPqM7+KqrYGSUuOAStrM=
-go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8=
-go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI=
-go.opentelemetry.io/collector/pdata/pprofile v0.128.0 h1:6DEtzs/liqv/ukz2EHbC5OMaj2V6K2pzuj/LaRg2YmY=
-go.opentelemetry.io/collector/pdata/pprofile v0.128.0/go.mod h1:bVVRpz+zKFf1UCCRUFqy8LvnO3tHlXKkdqW2d+Wi/iA=
-go.opentelemetry.io/collector/pdata/testdata v0.128.0 h1:5xcsMtyzvb18AnS2skVtWreQP1nl6G3PiXaylKCZ6pA=
-go.opentelemetry.io/collector/pdata/testdata v0.128.0/go.mod h1:9/VYVgzv3JMuIyo19KsT3FwkVyxbh3Eg5QlabQEUczA=
-go.opentelemetry.io/collector/pipeline v0.128.0 h1:WgNXdFbyf/QRLy5XbO/jtPQosWrSWX/TEnSYpJq8bgI=
-go.opentelemetry.io/collector/pipeline v0.128.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
-go.opentelemetry.io/collector/processor v1.34.0 h1:5pwXIG12XXxdkJ8F68e2cBEjEnFlCIAZhqEYM7vjkqE=
-go.opentelemetry.io/collector/processor v1.34.0/go.mod h1:VCl4vYj2tdO4APUcr0q6Eh796mqCCsH9Z/gqaPuzlUs=
-go.opentelemetry.io/collector/processor/processortest v0.128.0 h1:xPhOSmGFDGqhC3/nu1BqPSE6EpDPAf1/F+BfaYjDn/8=
-go.opentelemetry.io/collector/processor/processortest v0.128.0/go.mod h1:XXXom+mbAQtrkcvq4Ecd6n8RQoVgcfLe1vrUlr6U2gI=
-go.opentelemetry.io/collector/processor/xprocessor v0.128.0 h1:ObbtdXab0is6bdt4XabsRJZ+SUTuwQjPVlHTbmScfNg=
-go.opentelemetry.io/collector/processor/xprocessor v0.128.0/go.mod h1:/nHXW15nzwSRQ+25Cb+r17he/uMtCEvSOBGqpDbn3Uk=
+go.opentelemetry.io/collector/component v1.35.0 h1:JpvBukEcEUvJ/TInF1KYpXtWEP+C7iYkxCHKjI0o7BQ=
+go.opentelemetry.io/collector/component v1.35.0/go.mod h1:hU/ieWPxWbMAacODCSqem5ZaN6QH9W5GWiZ3MtXVuwc=
+go.opentelemetry.io/collector/component/componentstatus v0.129.0 h1:ejpBAt7hXAAZiQKcSxLvcy8sj8SjY4HOLdoXIlW6ybw=
+go.opentelemetry.io/collector/component/componentstatus v0.129.0/go.mod h1:/dLPIxn/tRMWmGi+DPtuFoBsffOLqPpSZ2IpEQzYtwI=
+go.opentelemetry.io/collector/component/componenttest v0.129.0 h1:gpKkZGCRPu3Yn0U2co09bMvhs17yLFb59oV8Gl9mmRI=
+go.opentelemetry.io/collector/component/componenttest v0.129.0/go.mod h1:JR9k34Qvd/pap6sYkPr5QqdHpTn66A5lYeYwhenKBAM=
+go.opentelemetry.io/collector/confmap v1.35.0 h1:U4JDATAl4PrKWe9bGHbZkoQXmJXefWgR2DIkFvw8ULQ=
+go.opentelemetry.io/collector/confmap v1.35.0/go.mod h1:qX37ExVBa+WU4jWWJCZc7IJ+uBjb58/9oL+/ctF1Bt0=
+go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 h1:Q/+pJKrkCaMPSoSAH2BpC3UZCh+5hTiFkh/bdy5yChk=
+go.opentelemetry.io/collector/confmap/xconfmap v0.129.0/go.mod h1:RNMnlay2meJDXcKjxiLbST9/YAhKLJlj0kZCrJrLGgw=
+go.opentelemetry.io/collector/consumer v1.35.0 h1:mgS42yh1maXBIE65IT4//iOA89BE+7xSUzV8czyevHg=
+go.opentelemetry.io/collector/consumer v1.35.0/go.mod h1:9sSPX0hDHaHqzR2uSmfLOuFK9v3e9K3HRQ+fydAjOWs=
+go.opentelemetry.io/collector/consumer/consumertest v0.129.0 h1:kRmrAgVvPxH5c/rTaOYAzyy0YrrYhQpBNkuqtDRrgeU=
+go.opentelemetry.io/collector/consumer/consumertest v0.129.0/go.mod h1:JgJKms1+v/CuAjkPH+ceTnKeDgUUGTQV4snGu5wTEHY=
+go.opentelemetry.io/collector/consumer/xconsumer v0.129.0 h1:bRyJ9TGWwnrUnB5oQGTjPhxpVRbkIVeugmvks22bJ4A=
+go.opentelemetry.io/collector/consumer/xconsumer v0.129.0/go.mod h1:pbe5ZyPJrtzdt/RRI0LqfT1GVBiJLbtkDKx3SBRTiTY=
+go.opentelemetry.io/collector/featuregate v1.35.0 h1:c/XRtA35odgxVc4VgOF/PTIk7ajw1wYdQ6QI562gzd4=
+go.opentelemetry.io/collector/featuregate v1.35.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
+go.opentelemetry.io/collector/internal/telemetry v0.129.0 h1:jkzRpIyMxMGdAzVOcBe8aRNrbP7eUrMq6cxEHe0sbzA=
+go.opentelemetry.io/collector/internal/telemetry v0.129.0/go.mod h1:riAPlR2LZBV7VEx4LicOKebg3N1Ja3izzkv5fl1Lhiw=
+go.opentelemetry.io/collector/pdata v1.35.0 h1:ck6WO6hCNjepADY/p9sT9/rLECTLO5ukYTumKzsqB/E=
+go.opentelemetry.io/collector/pdata v1.35.0/go.mod h1:pttpb089864qG1k0DMeXLgwwTFLk+o3fAW9I6MF9tzw=
+go.opentelemetry.io/collector/pdata/pprofile v0.129.0 h1:DgZTvjOGmyZRx7Or80hz8XbEaGwHPkIh2SX1A5eXttQ=
+go.opentelemetry.io/collector/pdata/pprofile v0.129.0/go.mod h1:uUBZxqJNOk6QIMvbx30qom//uD4hXJ1K/l3qysijMLE=
+go.opentelemetry.io/collector/pdata/testdata v0.129.0 h1:n1QLnLOtrcAR57oMSVzmtPsQEpCc/nE5Avk1xfuAkjY=
+go.opentelemetry.io/collector/pdata/testdata v0.129.0/go.mod h1:RfY5IKpmcvkS2IGVjl9jG9fcT7xpQEBWpg9sQOn/7mY=
+go.opentelemetry.io/collector/pipeline v0.129.0 h1:Mp7RuKLizLQJ0381eJqKQ0zpgkFlhTE9cHidpJQIvMU=
+go.opentelemetry.io/collector/pipeline v0.129.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
+go.opentelemetry.io/collector/processor v1.35.0 h1:YOfHemhhodYn4BnPjN7kWYYDhzPVqRkyHCaQ8mAlavs=
+go.opentelemetry.io/collector/processor v1.35.0/go.mod h1:cWHDOpmpAaVNCc9K9j2/okZoLIuP/EpGGRNhM4JGmFM=
+go.opentelemetry.io/collector/processor/processortest v0.129.0 h1:r5iJHdS7Ffdb2zmMVYx4ahe92PLrce5cas/AJEXivkY=
+go.opentelemetry.io/collector/processor/processortest v0.129.0/go.mod h1:gdf8GzyzjGoDTA11+CPwC4jfXphtC+B7MWbWn+LIWXc=
+go.opentelemetry.io/collector/processor/xprocessor v0.129.0 h1:V3Zgd+YIeu3Ij3DPlGtzdcTwpqOQIqQVcL5jdHHS7sc=
+go.opentelemetry.io/collector/processor/xprocessor v0.129.0/go.mod h1:78T+AP5NO137W/E+SibQhaqOyS67fR+IN697b4JFh00=
go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4=
go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw=
@@ -675,14 +675,14 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
-golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
+golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
+golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.238.0 h1:+EldkglWIg/pWjkq97sd+XxH7PxakNYoe/rkSTbnvOs=
-google.golang.org/api v0.238.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50=
+google.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo=
+google.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50=
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
diff --git a/internal/tools/go.mod b/internal/tools/go.mod
index 046a272340..05803fedaf 100644
--- a/internal/tools/go.mod
+++ b/internal/tools/go.mod
@@ -6,7 +6,7 @@ require (
github.com/bufbuild/buf v1.51.0
github.com/daixiang0/gci v0.13.6
github.com/gogo/protobuf v1.3.2
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1
)
require (
@@ -18,7 +18,7 @@ require (
buf.build/go/bufplugin v0.8.0 // indirect
buf.build/go/protoyaml v0.3.1 // indirect
buf.build/go/spdx v0.2.0 // indirect
- cel.dev/expr v0.21.2 // indirect
+ cel.dev/expr v0.23.0 // indirect
connectrpc.com/connect v1.18.1 // indirect
connectrpc.com/otelconnect v0.7.2 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
@@ -97,19 +97,19 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go.uber.org/zap/exp v0.3.0 // indirect
- golang.org/x/crypto v0.36.0 // indirect
+ golang.org/x/crypto v0.38.0 // indirect
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 // indirect
- golang.org/x/mod v0.24.0 // indirect
- golang.org/x/net v0.38.0 // indirect
- golang.org/x/sync v0.12.0 // indirect
- golang.org/x/sys v0.31.0 // indirect
- golang.org/x/term v0.30.0 // indirect
- golang.org/x/text v0.23.0 // indirect
- golang.org/x/tools v0.31.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
- google.golang.org/grpc v1.70.0 // indirect
- google.golang.org/protobuf v1.36.5 // indirect
+ golang.org/x/mod v0.25.0 // indirect
+ golang.org/x/net v0.40.0 // indirect
+ golang.org/x/sync v0.15.0 // indirect
+ golang.org/x/sys v0.33.0 // indirect
+ golang.org/x/term v0.32.0 // indirect
+ golang.org/x/text v0.26.0 // indirect
+ golang.org/x/tools v0.33.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
+ google.golang.org/grpc v1.73.0 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
pluginrpc.com/pluginrpc v0.5.0 // indirect
)
diff --git a/internal/tools/go.sum b/internal/tools/go.sum
index 54a200b5a6..544b227c79 100644
--- a/internal/tools/go.sum
+++ b/internal/tools/go.sum
@@ -14,8 +14,8 @@ buf.build/go/protoyaml v0.3.1 h1:ucyzE7DRnjX+mQ6AH4JzN0Kg50ByHHu+yrSKbgQn2D4=
buf.build/go/protoyaml v0.3.1/go.mod h1:0TzNpFQDXhwbkXb/ajLvxIijqbve+vMQvWY/b3/Dzxg=
buf.build/go/spdx v0.2.0 h1:IItqM0/cMxvFJJumcBuP8NrsIzMs/UYjp/6WSpq8LTw=
buf.build/go/spdx v0.2.0/go.mod h1:bXdwQFem9Si3nsbNy8aJKGPoaPi5DKwdeEp5/ArZ6w8=
-cel.dev/expr v0.21.2 h1:o+Wj235dy4gFYlYin3JsMpp3EEfMrPm/6tdoyjT98S0=
-cel.dev/expr v0.21.2/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
+cel.dev/expr v0.23.0 h1:wUb94w6OYQS4uXraxo9U+wUAs9jT47Xvl4iPgAwM2ss=
+cel.dev/expr v0.23.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw=
connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8=
connectrpc.com/otelconnect v0.7.2 h1:WlnwFzaW64dN06JXU+hREPUGeEzpz3Acz2ACOmN8cMI=
@@ -112,8 +112,8 @@ github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 h1:+J3r2e8+RsmN3vKfo7
github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
@@ -260,25 +260,25 @@ go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
-golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
+golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 h1:aWwlzYV971S4BXRS9AmqwDLAD85ouC6X+pocatKY58c=
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
-golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
+golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
-golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
+golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
-golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
+golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -288,34 +288,34 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
-golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
-golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
-golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
+golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
+golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
-golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
+golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
-golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
+golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
+golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
-google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
-google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
-google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
-google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
-google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
+google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
+google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
+google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/model/labels/labels.go b/model/labels/labels.go
index 2e8dc52cb3..a6e5654fa7 100644
--- a/model/labels/labels.go
+++ b/model/labels/labels.go
@@ -248,6 +248,17 @@ func (ls Labels) WithoutEmpty() Labels {
return ls
}
+// ByteSize returns the approximate size of the labels in bytes including
+// the two string headers size for name and value.
+// Slice header size is ignored because it should be amortized to zero.
+func (ls Labels) ByteSize() uint64 {
+ var size uint64 = 0
+ for _, l := range ls {
+ size += uint64(len(l.Name)+len(l.Value)) + 2*uint64(unsafe.Sizeof(""))
+ }
+ return size
+}
+
// Equal returns whether the two label sets are equal.
func Equal(ls, o Labels) bool {
return slices.Equal(ls, o)
diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go
index 64e0a69b83..edc6ff8e82 100644
--- a/model/labels/labels_dedupelabels.go
+++ b/model/labels/labels_dedupelabels.go
@@ -417,6 +417,13 @@ func (ls Labels) WithoutEmpty() Labels {
return ls
}
+// ByteSize returns the approximate size of the labels in bytes.
+// String header size is ignored because it should be amortized to zero.
+// SymbolTable size is also not taken into account.
+func (ls Labels) ByteSize() uint64 {
+ return uint64(len(ls.data))
+}
+
// Equal returns whether the two label sets are equal.
func Equal(a, b Labels) bool {
if a.syms == b.syms {
diff --git a/model/labels/labels_dedupelabels_test.go b/model/labels/labels_dedupelabels_test.go
index 5ef9255c21..229bb45a8e 100644
--- a/model/labels/labels_dedupelabels_test.go
+++ b/model/labels/labels_dedupelabels_test.go
@@ -21,6 +21,24 @@ import (
"github.com/stretchr/testify/require"
)
+var expectedSizeOfLabels = []uint64{ // Values must line up with testCaseLabels.
+ 16,
+ 0,
+ 41,
+ 270,
+ 271,
+ 325,
+}
+
+var expectedByteSize = []uint64{ // Values must line up with testCaseLabels.
+ 8,
+ 0,
+ 8,
+ 8,
+ 8,
+ 32,
+}
+
func TestVarint(t *testing.T) {
cases := []struct {
v int
diff --git a/model/labels/labels_slicelabels_test.go b/model/labels/labels_slicelabels_test.go
new file mode 100644
index 0000000000..2d592ef5b5
--- /dev/null
+++ b/model/labels/labels_slicelabels_test.go
@@ -0,0 +1,27 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build slicelabels
+
+package labels
+
+var expectedSizeOfLabels = []uint64{ // Values must line up with testCaseLabels.
+ 72,
+ 0,
+ 97,
+ 326,
+ 327,
+ 549,
+}
+
+var expectedByteSize = expectedSizeOfLabels // They are identical
diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go
index a2b16cac76..4b9bfd15af 100644
--- a/model/labels/labels_stringlabels.go
+++ b/model/labels/labels_stringlabels.go
@@ -283,6 +283,13 @@ func (ls Labels) WithoutEmpty() Labels {
return ls
}
+// ByteSize returns the approximate size of the labels in bytes.
+// String header size is ignored because it should be amortized to zero
+// because it may be shared across multiple copies of the Labels.
+func (ls Labels) ByteSize() uint64 {
+ return uint64(len(ls.data))
+}
+
// Equal returns whether the two label sets are equal.
func Equal(ls, o Labels) bool {
return ls.data == o.data
diff --git a/model/labels/labels_stringlabels_test.go b/model/labels/labels_stringlabels_test.go
new file mode 100644
index 0000000000..0704a2ff36
--- /dev/null
+++ b/model/labels/labels_stringlabels_test.go
@@ -0,0 +1,34 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !slicelabels && !dedupelabels
+
+package labels
+
+var expectedSizeOfLabels = []uint64{ // Values must line up with testCaseLabels.
+ 12,
+ 0,
+ 37,
+ 266,
+ 270,
+ 309,
+}
+
+var expectedByteSize = []uint64{ // Values must line up with testCaseLabels.
+ 12,
+ 0,
+ 37,
+ 266,
+ 270,
+ 309,
+}
diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go
index d9368ccfc3..4b23748a91 100644
--- a/model/labels/labels_test.go
+++ b/model/labels/labels_test.go
@@ -26,37 +26,33 @@ import (
"gopkg.in/yaml.v2"
)
+var (
+ s254 = strings.Repeat("x", 254) // Edge cases for stringlabels encoding.
+ s255 = strings.Repeat("x", 255)
+)
+
+var testCaseLabels = []Labels{
+ FromStrings("t1", "t1", "t2", "t2"),
+ {},
+ FromStrings("service.name", "t1", "whatever\\whatever", "t2"),
+ FromStrings("aaa", "111", "xx", s254),
+ FromStrings("aaa", "111", "xx", s255),
+ FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
+}
+
func TestLabels_String(t *testing.T) {
- s254 := strings.Repeat("x", 254) // Edge cases for stringlabels encoding.
- s255 := strings.Repeat("x", 255)
- cases := []struct {
- labels Labels
- expected string
- }{
- {
- labels: FromStrings("t1", "t1", "t2", "t2"),
- expected: "{t1=\"t1\", t2=\"t2\"}",
- },
- {
- labels: Labels{},
- expected: "{}",
- },
- {
- labels: FromStrings("service.name", "t1", "whatever\\whatever", "t2"),
- expected: `{"service.name"="t1", "whatever\\whatever"="t2"}`,
- },
- {
- labels: FromStrings("aaa", "111", "xx", s254),
- expected: `{aaa="111", xx="` + s254 + `"}`,
- },
- {
- labels: FromStrings("aaa", "111", "xx", s255),
- expected: `{aaa="111", xx="` + s255 + `"}`,
- },
+ expected := []string{ // Values must line up with testCaseLabels.
+ "{t1=\"t1\", t2=\"t2\"}",
+ "{}",
+ `{"service.name"="t1", "whatever\\whatever"="t2"}`,
+ `{aaa="111", xx="` + s254 + `"}`,
+ `{aaa="111", xx="` + s255 + `"}`,
+ `{" container"="prometheus", " namespace"="observability-prometheus", __name__="kube_pod_container_status_last_terminated_exitcode", cluster="prod-af-north-0", instance="kube-state-metrics-0:kube-state-metrics:ksm", job="kube-state-metrics/kube-state-metrics", pod="observability-prometheus-0", uid="d3ec90b2-4975-4607-b45d-b9ad64bb417e"}`,
}
- for _, c := range cases {
- str := c.labels.String()
- require.Equal(t, c.expected, str)
+ require.Len(t, expected, len(testCaseLabels))
+ for i, c := range expected {
+ str := testCaseLabels[i].String()
+ require.Equal(t, c, str)
}
}
@@ -67,6 +63,44 @@ func BenchmarkString(b *testing.B) {
}
}
+func TestSizeOfLabels(t *testing.T) {
+ require.Len(t, expectedSizeOfLabels, len(testCaseLabels))
+ for i, c := range expectedSizeOfLabels { // Declared in build-tag-specific files, e.g. labels_slicelabels_test.go.
+ var total uint64
+ testCaseLabels[i].Range(func(l Label) {
+ total += SizeOfLabels(l.Name, l.Value, 1)
+ })
+ require.Equal(t, c, total)
+ }
+}
+
+func TestByteSize(t *testing.T) {
+ require.Len(t, expectedByteSize, len(testCaseLabels))
+ for i, c := range expectedByteSize { // Declared in build-tag-specific files, e.g. labels_slicelabels_test.go.
+ require.Equal(t, c, testCaseLabels[i].ByteSize())
+ }
+}
+
+var GlobalTotal uint64 // Encourage the compiler not to elide the benchmark computation.
+
+func BenchmarkSize(b *testing.B) {
+ lb := New(benchmarkLabels...)
+ b.Run("SizeOfLabels", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var total uint64
+ lb.Range(func(l Label) {
+ total += SizeOfLabels(l.Name, l.Value, 1)
+ })
+ GlobalTotal = total
+ }
+ })
+ b.Run("ByteSize", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ GlobalTotal = lb.ByteSize()
+ }
+ })
+}
+
func TestLabels_MatchLabels(t *testing.T) {
labels := FromStrings(
"__name__", "ALERTS",
diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go
index e8ac3077bd..cc873011cb 100644
--- a/model/rulefmt/rulefmt_test.go
+++ b/model/rulefmt/rulefmt_test.go
@@ -49,7 +49,7 @@ sum without(instance) (rate(requests_total[5m]))
require.Equal(t, "HighAlert", rg.Rules[2].Alert)
require.Equal(t, "critical", rg.Rules[2].Labels["severity"])
- require.Equal(t, "stuff's happening with {{ $.labels.service }}", rg.Rules[0].Annotations["description"])
+ require.Equal(t, "stuff's happening with {{ $.labels.service }}", rg.Rules[2].Annotations["description"])
require.Equal(t, "HighAlert2", rg.Rules[3].Alert)
require.Equal(t, "critical", rg.Rules[3].Labels["severity"])
diff --git a/notifier/manager.go b/notifier/manager.go
index 69ce9b221b..c9463b24a8 100644
--- a/notifier/manager.go
+++ b/notifier/manager.go
@@ -254,7 +254,10 @@ func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group)
select {
case <-n.stopRequested:
return
- case ts := <-tsets:
+ case ts, ok := <-tsets:
+ if !ok {
+ break
+ }
n.reload(ts)
}
}
diff --git a/promql/durations.go b/promql/durations.go
index 8431fa5bd4..c882adfbb6 100644
--- a/promql/durations.go
+++ b/promql/durations.go
@@ -21,14 +21,25 @@ import (
"github.com/prometheus/prometheus/promql/parser"
)
-// durationVisitor is a visitor that visits a duration expression and calculates the duration.
-type durationVisitor struct{}
+// durationVisitor is a visitor that calculates the actual value of
+// duration expressions in AST nodes. For example the query
+// "http_requests_total offset (1h / 2)" is represented in the AST
+// as a VectorSelector with OriginalOffset 0 and the duration expression
+// in OriginalOffsetExpr representing (1h / 2). This visitor evaluates
+// such duration expression, setting OriginalOffset to 30m.
+type durationVisitor struct {
+ step time.Duration
+}
+// Visit finds any duration expressions in AST Nodes and modifies the Node to
+// store the concrete value. Note that parser.Walk does NOT traverse the
+// duration expressions such as OriginalOffsetExpr so we make our own recursive
+// call on those to evaluate the result.
func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visitor, error) {
switch n := node.(type) {
case *parser.VectorSelector:
if n.OriginalOffsetExpr != nil {
- duration, err := calculateDuration(n.OriginalOffsetExpr, true)
+ duration, err := v.calculateDuration(n.OriginalOffsetExpr, true)
if err != nil {
return nil, err
}
@@ -36,7 +47,7 @@ func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visit
}
case *parser.MatrixSelector:
if n.RangeExpr != nil {
- duration, err := calculateDuration(n.RangeExpr, false)
+ duration, err := v.calculateDuration(n.RangeExpr, false)
if err != nil {
return nil, err
}
@@ -44,21 +55,21 @@ func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visit
}
case *parser.SubqueryExpr:
if n.OriginalOffsetExpr != nil {
- duration, err := calculateDuration(n.OriginalOffsetExpr, true)
+ duration, err := v.calculateDuration(n.OriginalOffsetExpr, true)
if err != nil {
return nil, err
}
n.OriginalOffset = duration
}
if n.StepExpr != nil {
- duration, err := calculateDuration(n.StepExpr, false)
+ duration, err := v.calculateDuration(n.StepExpr, false)
if err != nil {
return nil, err
}
n.Step = duration
}
if n.RangeExpr != nil {
- duration, err := calculateDuration(n.RangeExpr, false)
+ duration, err := v.calculateDuration(n.RangeExpr, false)
if err != nil {
return nil, err
}
@@ -68,9 +79,10 @@ func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visit
return v, nil
}
-// calculateDuration computes the duration from a duration expression.
-func calculateDuration(expr parser.Expr, allowedNegative bool) (time.Duration, error) {
- duration, err := evaluateDurationExpr(expr)
+// calculateDuration returns the float value of a duration expression as
+// time.Duration or an error if the duration is invalid.
+func (v *durationVisitor) calculateDuration(expr parser.Expr, allowedNegative bool) (time.Duration, error) {
+ duration, err := v.evaluateDurationExpr(expr)
if err != nil {
return 0, err
}
@@ -84,7 +96,7 @@ func calculateDuration(expr parser.Expr, allowedNegative bool) (time.Duration, e
}
// evaluateDurationExpr recursively evaluates a duration expression to a float64 value.
-func evaluateDurationExpr(expr parser.Expr) (float64, error) {
+func (v *durationVisitor) evaluateDurationExpr(expr parser.Expr) (float64, error) {
switch n := expr.(type) {
case *parser.NumberLiteral:
return n.Val, nil
@@ -93,19 +105,31 @@ func evaluateDurationExpr(expr parser.Expr) (float64, error) {
var err error
if n.LHS != nil {
- lhs, err = evaluateDurationExpr(n.LHS)
+ lhs, err = v.evaluateDurationExpr(n.LHS)
if err != nil {
return 0, err
}
}
- rhs, err = evaluateDurationExpr(n.RHS)
- if err != nil {
- return 0, err
+ if n.RHS != nil {
+ rhs, err = v.evaluateDurationExpr(n.RHS)
+ if err != nil {
+ return 0, err
+ }
}
switch n.Op {
+ case parser.STEP:
+ return float64(v.step.Seconds()), nil
+ case parser.MIN:
+ return math.Min(lhs, rhs), nil
+ case parser.MAX:
+ return math.Max(lhs, rhs), nil
case parser.ADD:
+ if n.LHS == nil {
+ // Unary positive duration expression.
+ return rhs, nil
+ }
return lhs + rhs, nil
case parser.SUB:
if n.LHS == nil {
diff --git a/promql/durations_test.go b/promql/durations_test.go
index 0cdfb7597a..18592a0d0a 100644
--- a/promql/durations_test.go
+++ b/promql/durations_test.go
@@ -195,6 +195,24 @@ func TestCalculateDuration(t *testing.T) {
expected: -5 * time.Second,
allowedNegative: true,
},
+ {
+ name: "step",
+ expr: &parser.DurationExpr{
+ Op: parser.STEP,
+ },
+ expected: 1 * time.Second,
+ },
+ {
+ name: "step multiplication",
+ expr: &parser.DurationExpr{
+ LHS: &parser.DurationExpr{
+ Op: parser.STEP,
+ },
+ RHS: &parser.NumberLiteral{Val: 3},
+ Op: parser.MUL,
+ },
+ expected: 3 * time.Second,
+ },
{
name: "division by zero",
expr: &parser.DurationExpr{
@@ -225,7 +243,8 @@ func TestCalculateDuration(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- result, err := calculateDuration(tt.expr, tt.allowedNegative)
+ v := &durationVisitor{step: 1 * time.Second}
+ result, err := v.calculateDuration(tt.expr, tt.allowedNegative)
if tt.errorMessage != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.errorMessage)
diff --git a/promql/engine.go b/promql/engine.go
index adf49ed96e..f5ee591d3b 100644
--- a/promql/engine.go
+++ b/promql/engine.go
@@ -86,11 +86,6 @@ type engineMetrics struct {
querySamples prometheus.Counter
}
-// convertibleToInt64 returns true if v does not over-/underflow an int64.
-func convertibleToInt64(v float64) bool {
- return v <= maxInt64 && v >= minInt64
-}
-
type (
// ErrQueryTimeout is returned if a query timed out during processing.
ErrQueryTimeout string
@@ -134,7 +129,7 @@ type QueryLogger interface {
io.Closer
}
-// A Query is derived from an a raw query string and can be run against an engine
+// A Query is derived from a raw query string and can be run against an engine
// it is associated with.
type Query interface {
// Exec processes the query. Can only be called once.
@@ -481,7 +476,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) {
// NewInstantQuery returns an evaluation query for the given expression at the given time.
func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error) {
- pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0)
+ pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0*time.Second)
finishQueue, err := ng.queueActive(ctx, qry)
if err != nil {
return nil, err
@@ -494,7 +489,7 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts
if err := ng.validateOpts(expr); err != nil {
return nil, err
}
- *pExpr, err = PreprocessExpr(expr, ts, ts)
+ *pExpr, err = PreprocessExpr(expr, ts, ts, 0)
return qry, err
}
@@ -518,7 +513,7 @@ func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts Q
if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar {
return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type()))
}
- *pExpr, err = PreprocessExpr(expr, start, end)
+ *pExpr, err = PreprocessExpr(expr, start, end, interval)
return qry, err
}
@@ -1433,6 +1428,15 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
if params.Max() < 1 {
return nil, annos
}
+ if params.HasAnyNaN() {
+ ev.errorf("Parameter value is NaN")
+ }
+ if fParam := params.Min(); fParam <= minInt64 {
+ ev.errorf("Scalar value %v underflows int64", fParam)
+ }
+ if fParam := params.Max(); fParam >= maxInt64 {
+ ev.errorf("Scalar value %v overflows int64", fParam)
+ }
seriess = make(map[uint64]Series, len(inputMatrix))
case parser.LIMIT_RATIO:
@@ -1440,6 +1444,9 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
if params.Max() == 0 && params.Min() == 0 {
return nil, annos
}
+ if params.HasAnyNaN() {
+ ev.errorf("Ratio value is NaN")
+ }
if params.Max() > 1.0 {
annos.Add(annotations.NewInvalidRatioWarning(params.Max(), 1.0, aggExpr.Param.PositionRange()))
}
@@ -3325,9 +3332,6 @@ seriesLoop:
var r float64
switch op {
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
- if !convertibleToInt64(fParam) {
- ev.errorf("Scalar value %v overflows int64", fParam)
- }
k = int64(fParam)
if k > int64(len(inputMatrix)) {
k = int64(len(inputMatrix))
@@ -3339,9 +3343,6 @@ seriesLoop:
return nil, annos
}
case parser.LIMIT_RATIO:
- if math.IsNaN(fParam) {
- ev.errorf("Ratio value %v is NaN", fParam)
- }
switch {
case fParam == 0:
if enh.Ts != ev.endTimestamp {
@@ -3730,10 +3731,10 @@ func unwrapStepInvariantExpr(e parser.Expr) parser.Expr {
// PreprocessExpr wraps all possible step invariant parts of the given expression with
// StepInvariantExpr. It also resolves the preprocessors and evaluates duration expressions
// into their numeric values.
-func PreprocessExpr(expr parser.Expr, start, end time.Time) (parser.Expr, error) {
+func PreprocessExpr(expr parser.Expr, start, end time.Time, step time.Duration) (parser.Expr, error) {
detectHistogramStatsDecoding(expr)
- if err := parser.Walk(&durationVisitor{}, expr, nil); err != nil {
+ if err := parser.Walk(&durationVisitor{step: step}, expr, nil); err != nil {
return nil, err
}
diff --git a/promql/engine_test.go b/promql/engine_test.go
index f352d5999c..ce5ef6efd7 100644
--- a/promql/engine_test.go
+++ b/promql/engine_test.go
@@ -3088,7 +3088,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
t.Run(test.input, func(t *testing.T) {
expr, err := parser.ParseExpr(test.input)
require.NoError(t, err)
- expr, err = promql.PreprocessExpr(expr, startTime, endTime)
+ expr, err = promql.PreprocessExpr(expr, startTime, endTime, 0)
require.NoError(t, err)
if test.outputTest {
require.Equal(t, test.input, expr.String(), "error on input '%s'", test.input)
diff --git a/promql/functions.go b/promql/functions.go
index 9af904c9e6..2577e7f27b 100644
--- a/promql/functions.go
+++ b/promql/functions.go
@@ -144,32 +144,37 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
// (which is our guess for where the series actually starts or ends).
extrapolationThreshold := averageDurationBetweenSamples * 1.1
- extrapolateToInterval := sampledInterval
-
if durationToStart >= extrapolationThreshold {
durationToStart = averageDurationBetweenSamples / 2
}
- if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 {
+ if isCounter {
// Counters cannot be negative. If we have any slope at all
// (i.e. resultFloat went up), we can extrapolate the zero point
// of the counter. If the duration to the zero point is shorter
// than the durationToStart, we take the zero point as the start
// of the series, thereby avoiding extrapolation to negative
// counter values.
- // TODO(beorn7): Do this for histograms, too.
- durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat)
+ durationToZero := durationToStart
+ if resultFloat > 0 &&
+ len(samples.Floats) > 0 &&
+ samples.Floats[0].F >= 0 {
+ durationToZero = sampledInterval * (samples.Floats[0].F / resultFloat)
+ } else if resultHistogram != nil &&
+ resultHistogram.Count > 0 &&
+ len(samples.Histograms) > 0 &&
+ samples.Histograms[0].H.Count >= 0 {
+ durationToZero = sampledInterval * (samples.Histograms[0].H.Count / resultHistogram.Count)
+ }
if durationToZero < durationToStart {
durationToStart = durationToZero
}
}
- extrapolateToInterval += durationToStart
if durationToEnd >= extrapolationThreshold {
durationToEnd = averageDurationBetweenSamples / 2
}
- extrapolateToInterval += durationToEnd
- factor := extrapolateToInterval / sampledInterval
+ factor := (sampledInterval + durationToStart + durationToEnd) / sampledInterval
if isRate {
factor /= ms.Range.Seconds()
}
diff --git a/promql/parser/ast.go b/promql/parser/ast.go
index 9eebaed9ab..dc3e36b5b5 100644
--- a/promql/parser/ast.go
+++ b/promql/parser/ast.go
@@ -116,7 +116,8 @@ type DurationExpr struct {
LHS, RHS Expr // The operands on the respective sides of the operator.
Wrapped bool // Set when the duration is wrapped in parentheses.
- StartPos posrange.Pos // For unary operations, the position of the operator.
+ StartPos posrange.Pos // For unary operations and step(), the start position of the operator.
+ EndPos posrange.Pos // For step(), the end position of the operator.
}
// Call represents a function call.
@@ -455,6 +456,18 @@ func (e *BinaryExpr) PositionRange() posrange.PositionRange {
}
func (e *DurationExpr) PositionRange() posrange.PositionRange {
+ if e.Op == STEP {
+ return posrange.PositionRange{
+ Start: e.StartPos,
+ End: e.EndPos,
+ }
+ }
+ if e.RHS == nil {
+ return posrange.PositionRange{
+ Start: e.StartPos,
+ End: e.RHS.PositionRange().End,
+ }
+ }
if e.LHS == nil {
return posrange.PositionRange{
Start: e.StartPos,
diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y
index fc901374e2..e7e16cd033 100644
--- a/promql/parser/generated_parser.y
+++ b/promql/parser/generated_parser.y
@@ -150,6 +150,7 @@ WITHOUT
%token
START
END
+STEP
%token preprocessorEnd
// Counter reset hints.
@@ -174,7 +175,7 @@ START_METRIC_SELECTOR
// Type definitions for grammar rules.
%type label_match_list
%type label_matcher
-%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint
+%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint min_max
%type label_set metric
%type label_set_list
%type