diff --git a/.github/workflows/automerge-dependabot.yml b/.github/workflows/automerge-dependabot.yml
index 616e4ee8b6..8b07f4df95 100644
--- a/.github/workflows/automerge-dependabot.yml
+++ b/.github/workflows/automerge-dependabot.yml
@@ -19,7 +19,7 @@ jobs:
steps:
- name: Dependabot metadata
id: metadata
- uses: dependabot/fetch-metadata@08eff52bf64351f401fb50d4972fa95b9f2c2d1b # v2.4.0
+ uses: dependabot/fetch-metadata@21025c705c08248db411dc16f3619e6b5f9ea21a # v2.5.0
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
- name: Enable auto-merge for Dependabot PRs
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 87b6fb90a0..b09b65619e 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -130,6 +130,27 @@ jobs:
- run: make -C documentation/prometheus-mixin
- run: git diff --exit-code
+ test-compliance:
+ name: Compliance testing
+ runs-on: ubuntu-latest
+ container:
+ # Whenever the Go version is updated here, .promu.yml
+ # should also be updated.
+ image: quay.io/prometheus/golang-builder:1.25-base
+ steps:
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
+ with:
+ persist-credentials: false
+ - uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
+ - uses: ./.github/promci/actions/setup_environment
+ with:
+ enable_npm: false
+ # NOTE: Those tests are based on https://github.com/prometheus/compliance and
+ # are executed against the ./cmd/prometheus main package.
+ - run: go test -skip ${SKIP_TESTS} -v --tags=compliance ./compliance/...
+ env:
+ SKIP_TESTS: "TestRemoteWriteSender/prometheus/samples/rw2/start_timestamp*" # TODO(bwplotka): PROM-60
+
build:
name: Build Prometheus for common architectures
runs-on: ubuntu-latest
@@ -254,6 +275,12 @@ jobs:
with:
args: --verbose --build-tags=dedupelabels
version: ${{ steps.golangci-lint-version.outputs.version }}
+ - name: Lint in documentation/examples/remote_storage
+ uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
+ with:
+ args: --verbose
+ working-directory: documentation/examples/remote_storage
+ version: ${{ steps.golangci-lint-version.outputs.version }}
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'
@@ -310,7 +337,7 @@ jobs:
persist-credentials: false
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
- name: Install nodejs
- uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
+ uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
with:
node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org"
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 8dfa6049f2..ac636c1797 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -29,12 +29,12 @@ jobs:
persist-credentials: false
- name: Initialize CodeQL
- uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
+ uses: github/codeql-action/init@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
with:
languages: ${{ matrix.language }}
- name: Autobuild
- uses: github/codeql-action/autobuild@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
+ uses: github/codeql-action/autobuild@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
+ uses: github/codeql-action/analyze@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 81dcbf5c2a..242f0a8ae8 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
+ uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
with:
sarif_file: results.sarif
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 947e670fd8..b29097c400 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -11,7 +11,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
runs-on: ubuntu-latest
steps:
- - uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
+ - uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# opt out of defaults to avoid marking issues as stale and closing them
diff --git a/.golangci.yml b/.golangci.yml
index 8cb3265f4f..ff37050211 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -39,6 +39,7 @@ linters:
- predeclared
- revive
- sloglint
+ - staticcheck
- testifylint
- unconvert
- unused
@@ -128,8 +129,6 @@ linters:
# Disable this check for now since it introduces too many changes in our existing codebase.
# See https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#hdr-Analyzer_omitzero for more details.
- omitzero
- # Disable waitgroup check until we really move to Go 1.25.
- - waitgroup
perfsprint:
# Optimizes even if it requires an int or uint type cast.
int-conversion: true
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index 2e4a982382..ae61059af5 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -6,6 +6,7 @@ General maintainers:
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
+* György Krajcsovits ( / @krajorama)
Maintainers for specific parts of the codebase:
* `cmd`
@@ -18,7 +19,7 @@ Maintainers for specific parts of the codebase:
* `storage`
* `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Alex Greenbank ( / @alexgreenbank)
* `otlptranslator`: Arthur Silva Sens ( / @ArthurSens), Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez)
-* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez), George Krajcsovits ( / @krajorama)
+* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez)
* `web`
* `ui`: Julius Volz ( / @juliusv)
* `module`: Augustin Husson ( / @nexucis)
diff --git a/Makefile.common b/Makefile.common
index b8c9b3844c..18f20f79ab 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v2.7.2
+GOLANGCI_LINT_VERSION ?= v2.10.1
GOLANGCI_FMT_OPTS ?=
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index 3910991148..39e6676731 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -1193,9 +1193,11 @@ func main() {
func() error {
<-reloadReady.C
ruleManager.Run()
+ logger.Info("Rule manager stopped")
return nil
},
func(error) {
+ logger.Info("Stopping rule manager manager...")
ruleManager.Stop()
},
)
@@ -1230,9 +1232,11 @@ func main() {
func() error {
<-reloadReady.C
tracingManager.Run()
+ logger.Info("Tracing manager stopped")
return nil
},
func(error) {
+ logger.Info("Stopping tracing manager...")
tracingManager.Stop()
},
)
@@ -1309,6 +1313,7 @@ func main() {
checksum = currentChecksum
}
case <-cancel:
+ logger.Info("Reloaders stopped")
return nil
}
}
@@ -1316,6 +1321,7 @@ func main() {
func(error) {
// Wait for any in-progress reloads to complete to avoid
// reloading things after they have been shutdown.
+ logger.Info("Stopping reloaders...")
cancel <- struct{}{}
},
)
@@ -1399,9 +1405,11 @@ func main() {
db.SetWriteNotified(remoteStorage)
close(dbOpen)
<-cancel
+ logger.Info("TSDB stopped")
return nil
},
func(error) {
+ logger.Info("Stopping storage...")
if err := fanoutStorage.Close(); err != nil {
logger.Error("Error stopping storage", "err", err)
}
@@ -1456,9 +1464,11 @@ func main() {
db.SetWriteNotified(remoteStorage)
close(dbOpen)
<-cancel
+ logger.Info("Agent WAL storage stopped")
return nil
},
func(error) {
+ logger.Info("Stopping agent WAL storage...")
if err := fanoutStorage.Close(); err != nil {
logger.Error("Error stopping storage", "err", err)
}
@@ -1473,9 +1483,11 @@ func main() {
if err := webHandler.Run(ctxWeb, listeners, *webConfig); err != nil {
return fmt.Errorf("error starting web server: %w", err)
}
+ logger.Info("Web handler stopped")
return nil
},
func(error) {
+ logger.Info("Stopping web handler...")
cancelWeb()
},
)
@@ -1498,6 +1510,7 @@ func main() {
return nil
},
func(error) {
+ logger.Info("Stopping notifier manager...")
notifierManager.Stop()
},
)
diff --git a/cmd/prometheus/testdata/features.json b/cmd/prometheus/testdata/features.json
index c39f60ab33..ce7dbbaebe 100644
--- a/cmd/prometheus/testdata/features.json
+++ b/cmd/prometheus/testdata/features.json
@@ -80,6 +80,7 @@
"histogram_count": true,
"histogram_fraction": true,
"histogram_quantile": true,
+ "histogram_quantiles": false,
"histogram_stddev": true,
"histogram_stdvar": true,
"histogram_sum": true,
@@ -185,6 +186,7 @@
"dockerswarm": true,
"ec2": true,
"ecs": true,
+ "elasticache": true,
"eureka": true,
"file": true,
"gce": true,
diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go
index abb709c31d..4dc6c7615f 100644
--- a/cmd/promtool/main.go
+++ b/cmd/promtool/main.go
@@ -372,7 +372,7 @@ func main() {
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer))
case checkConfigCmd.FullCommand():
- os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newConfigLintConfig(*checkConfigLint, *checkConfigLintFatal, *checkConfigIgnoreUnknownFields, model.UTF8Validation, model.Duration(*checkLookbackDelta)), *configFiles...))
+ os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newConfigLintConfig(*checkConfigLint, *checkConfigLintFatal, *checkConfigIgnoreUnknownFields, model.UTF8Validation, model.Duration(*checkLookbackDelta)), promtoolParser, *configFiles...))
case checkServerHealthCmd.FullCommand():
os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper)))
@@ -598,7 +598,7 @@ func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper ht
}
// CheckConfig validates configuration files.
-func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings configLintConfig, files ...string) int {
+func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings configLintConfig, p parser.Parser, files ...string) int {
failed := false
hasErrors := false
@@ -619,7 +619,7 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings configLintConfig,
if !checkSyntaxOnly {
scrapeConfigsFailed := lintScrapeConfigs(scrapeConfigs, lintSettings)
failed = failed || scrapeConfigsFailed
- rulesFailed, rulesHaveErrors := checkRules(ruleFiles, lintSettings.rulesLintConfig, parser.NewParser(parser.Options{}))
+ rulesFailed, rulesHaveErrors := checkRules(ruleFiles, lintSettings.rulesLintConfig, p)
failed = failed || rulesFailed
hasErrors = hasErrors || rulesHaveErrors
}
@@ -948,11 +948,11 @@ func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings rulesLintConfig) (int
dRules := checkDuplicates(rgs.Groups)
if len(dRules) != 0 {
var errMessage strings.Builder
- errMessage.WriteString(fmt.Sprintf("%d duplicate rule(s) found.\n", len(dRules)))
+ fmt.Fprintf(&errMessage, "%d duplicate rule(s) found.\n", len(dRules))
for _, n := range dRules {
- errMessage.WriteString(fmt.Sprintf("Metric: %s\nLabel(s):\n", n.metric))
+ fmt.Fprintf(&errMessage, "Metric: %s\nLabel(s):\n", n.metric)
n.label.Range(func(l labels.Label) {
- errMessage.WriteString(fmt.Sprintf("\t%s: %s\n", l.Name, l.Value))
+ fmt.Fprintf(&errMessage, "\t%s: %s\n", l.Name, l.Value)
})
}
errMessage.WriteString("Might cause inconsistency while recording expressions")
diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go
index 3b1730d894..297dd35d70 100644
--- a/cmd/promtool/main_test.go
+++ b/cmd/promtool/main_test.go
@@ -706,20 +706,21 @@ func TestCheckScrapeConfigs(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
// Non-fatal linting.
- code := CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, false, false, model.UTF8Validation, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
+ p := parser.NewParser(parser.Options{})
+ code := CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, false, false, model.UTF8Validation, tc.lookbackDelta), p, "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
require.Equal(t, successExitCode, code, "Non-fatal linting should return success")
// Fatal linting.
- code = CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
+ code = CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), p, "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
if tc.expectError {
require.Equal(t, lintErrExitCode, code, "Fatal linting should return error")
} else {
require.Equal(t, successExitCode, code, "Fatal linting should return success when there are no problems")
}
// Check syntax only, no linting.
- code = CheckConfig(false, true, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
+ code = CheckConfig(false, true, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), p, "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
require.Equal(t, successExitCode, code, "Fatal linting should return success when checking syntax only")
// Lint option "none" should disable linting.
- code = CheckConfig(false, false, newConfigLintConfig(lintOptionNone+","+lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
+ code = CheckConfig(false, false, newConfigLintConfig(lintOptionNone+","+lintOptionTooLongScrapeInterval, true, false, model.UTF8Validation, tc.lookbackDelta), p, "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
require.Equal(t, successExitCode, code, `Fatal linting should return success when lint option "none" is specified`)
})
}
diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go
index 1aaf87bc42..f43da0e1d0 100644
--- a/cmd/promtool/tsdb.go
+++ b/cmd/promtool/tsdb.go
@@ -159,17 +159,14 @@ func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (u
batch := lbls[:l]
lbls = lbls[l:]
- wg.Add(1)
- go func() {
- defer wg.Done()
-
+ wg.Go(func() {
n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i))
if err != nil {
// exitWithError(err)
fmt.Println(" err", err)
}
total.Add(n)
- }()
+ })
}
wg.Wait()
}
diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go
index 7e3db94501..dab452af64 100644
--- a/cmd/promtool/unittest.go
+++ b/cmd/promtool/unittest.go
@@ -255,6 +255,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
Context: context.Background(),
NotifyFunc: func(context.Context, string, ...*rules.Alert) {},
Logger: promslog.NewNopLogger(),
+ Parser: tg.parser,
}
m := rules.NewManager(opts)
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ignoreUnknownFields, ruleFiles...)
@@ -560,9 +561,9 @@ Outer:
// seriesLoadingString returns the input series in PromQL notation.
func (tg *testGroup) seriesLoadingString() string {
var result strings.Builder
- result.WriteString(fmt.Sprintf("load %v\n", shortDuration(tg.Interval)))
+ fmt.Fprintf(&result, "load %v\n", shortDuration(tg.Interval))
for _, is := range tg.InputSeries {
- result.WriteString(fmt.Sprintf(" %v %v\n", is.Series, is.Values))
+ fmt.Fprintf(&result, " %v %v\n", is.Series, is.Values)
}
return result.String()
}
diff --git a/compliance/go.mod b/compliance/go.mod
new file mode 100644
index 0000000000..54adc20b6c
--- /dev/null
+++ b/compliance/go.mod
@@ -0,0 +1,26 @@
+module compliance
+
+go 1.25.5
+
+require github.com/prometheus/compliance/remotewrite v0.0.0-20260220101514-bccaa3a70275
+
+require (
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/snappy v1.0.0 // indirect
+ github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 // indirect
+ github.com/klauspost/compress v1.18.1 // indirect
+ github.com/kr/pretty v0.3.1 // indirect
+ github.com/oklog/run v1.2.0 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
+ github.com/prometheus/client_golang/exp v0.0.0-20250914183048-a974e0d45e0a // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.67.2 // indirect
+ github.com/prometheus/prometheus v0.307.4-0.20251119130332-1174b0ce4f1f // indirect
+ github.com/stretchr/testify v1.11.1 // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
+ golang.org/x/text v0.30.0 // indirect
+ google.golang.org/protobuf v1.36.10 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/compliance/go.sum b/compliance/go.sum
new file mode 100644
index 0000000000..6f273f49bd
--- /dev/null
+++ b/compliance/go.sum
@@ -0,0 +1,79 @@
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
+github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7EAJ0BHIethd+J6LqxFNw5mSiI2bM=
+github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
+github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E=
+github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang/exp v0.0.0-20250914183048-a974e0d45e0a h1:RF1vfKM34/3DbGNis22BGd6sDDY3XBi0eM7pYqmOEO0=
+github.com/prometheus/client_golang/exp v0.0.0-20250914183048-a974e0d45e0a/go.mod h1:FGJuwvfcPY0V5enm+w8zF1RNS062yugQtPPQp1c4Io4=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.67.2 h1:PcBAckGFTIHt2+L3I33uNRTlKTplNzFctXcWhPyAEN8=
+github.com/prometheus/common v0.67.2/go.mod h1:63W3KZb1JOKgcjlIr64WW/LvFGAqKPj0atm+knVGEko=
+github.com/prometheus/compliance/remotewrite v0.0.0-20260220101514-bccaa3a70275 h1:NLTtFqM00EuqtisYX9P+hQkjoxNxsR2oUQWDluyD2Xw=
+github.com/prometheus/compliance/remotewrite v0.0.0-20260220101514-bccaa3a70275/go.mod h1:VEPZGvpSBbzTKc5acnBj9ng4gfo1DZ4qBsCQnoNFiSc=
+github.com/prometheus/prometheus v0.307.4-0.20251119130332-1174b0ce4f1f h1:ERPCnBglv9Z4IjkEBTNbcHmZPlryMldXVWLkk7TeBIY=
+github.com/prometheus/prometheus v0.307.4-0.20251119130332-1174b0ce4f1f/go.mod h1:7hcXiGf9AXIKW2ehWWzxkvRYJTGmc2StUIJ8mprfxjg=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
+golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
+google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/compliance/remote_write_sender_test.go b/compliance/remote_write_sender_test.go
new file mode 100644
index 0000000000..6840132bd3
--- /dev/null
+++ b/compliance/remote_write_sender_test.go
@@ -0,0 +1,93 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compliance
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "html/template"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/prometheus/compliance/remotewrite/sender"
+)
+
+const (
+ scrapeConfigTemplate = `
+global:
+ scrape_interval: 1s
+
+remote_write:
+ - url: "{{.RemoteWriteEndpointURL}}"
+ protobuf_message: "{{.RemoteWriteMessage}}"
+ send_exemplars: true
+ queue_config:
+ retry_on_http_429: true
+ metadata_config:
+ send: true
+
+scrape_configs:
+ - job_name: "{{.ScrapeTargetJobName}}"
+ scrape_interval: 1s
+ scrape_protocols:
+ - PrometheusProto
+ - OpenMetricsText1.0.0
+ - PrometheusText0.0.4
+ static_configs:
+ - targets: ["{{.ScrapeTargetHostPort}}"]
+`
+)
+
+var scrapeConfigTmpl = template.Must(template.New("config").Parse(scrapeConfigTemplate))
+
+type internalPrometheus struct{}
+
+func (p internalPrometheus) Name() string { return "internal-prometheus" }
+
+// Run runs a cmd/prometheus main package as a test sender target, until ctx is done.
+func (p internalPrometheus) Run(ctx context.Context, opts sender.Options) error {
+ var buf bytes.Buffer
+ if err := scrapeConfigTmpl.Execute(&buf, opts); err != nil {
+ return fmt.Errorf("failed to execute config template: %w", err)
+ }
+
+ dir, err := os.MkdirTemp("", "test-*")
+ if err != nil {
+ return err
+ }
+ configFile := filepath.Join(dir, "config.yaml")
+ if err := os.WriteFile(configFile, buf.Bytes(), 0o600); err != nil {
+ return err
+ }
+ defer os.RemoveAll(dir)
+
+ return sender.RunCommand(ctx, "../cmd/prometheus", nil,
+ "go", "run", ".",
+ "--web.listen-address=0.0.0.0:0",
+ fmt.Sprintf("--storage.tsdb.path=%v", dir),
+ fmt.Sprintf("--config.file=%s", configFile),
+ // Set important flags for the full remote write compliance:
+ "--enable-feature=st-storage",
+ )
+}
+
+var _ sender.Sender = internalPrometheus{}
+
+// TestRemoteWriteSender runs remote write sender compliance tests defined in
+// https://github.com/prometheus/compliance/tree/main/remotewrite/sender
+func TestRemoteWriteSender(t *testing.T) {
+ sender.RunTests(t, internalPrometheus{}, sender.ComplianceTests())
+}
diff --git a/discovery/aws/aws.go b/discovery/aws/aws.go
index 69b3b41c06..f0f9c3d4df 100644
--- a/discovery/aws/aws.go
+++ b/discovery/aws/aws.go
@@ -43,10 +43,11 @@ type Role string
// The valid options for Role.
const (
- RoleEC2 Role = "ec2"
- RoleECS Role = "ecs"
- RoleLightsail Role = "lightsail"
- RoleMSK Role = "msk"
+ RoleEC2 Role = "ec2"
+ RoleECS Role = "ecs"
+ RoleElasticache Role = "elasticache"
+ RoleLightsail Role = "lightsail"
+ RoleMSK Role = "msk"
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@@ -55,7 +56,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(any) error) error {
return err
}
switch *c {
- case RoleEC2, RoleECS, RoleLightsail, RoleMSK:
+ case RoleEC2, RoleECS, RoleElasticache, RoleLightsail, RoleMSK:
return nil
default:
return fmt.Errorf("unknown AWS SD role %q", *c)
@@ -86,10 +87,11 @@ type SDConfig struct {
Clusters []string `yaml:"clusters,omitempty"`
// Embedded sub-configs (internal use only, not serialized)
- *EC2SDConfig `yaml:"-"`
- *ECSSDConfig `yaml:"-"`
- *LightsailSDConfig `yaml:"-"`
- *MSKSDConfig `yaml:"-"`
+ *EC2SDConfig `yaml:"-"`
+ *ECSSDConfig `yaml:"-"`
+ *ElasticacheSDConfig `yaml:"-"`
+ *LightsailSDConfig `yaml:"-"`
+ *MSKSDConfig `yaml:"-"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface for SDConfig.
@@ -172,6 +174,37 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
if c.Clusters != nil {
c.ECSSDConfig.Clusters = c.Clusters
}
+ case RoleElasticache:
+ if c.ElasticacheSDConfig == nil {
+ elasticacheConfig := DefaultElasticacheSDConfig
+ c.ElasticacheSDConfig = &elasticacheConfig
+ }
+ c.ElasticacheSDConfig.HTTPClientConfig = c.HTTPClientConfig
+ c.ElasticacheSDConfig.Region = c.Region
+ if c.Endpoint != "" {
+ c.ElasticacheSDConfig.Endpoint = c.Endpoint
+ }
+ if c.AccessKey != "" {
+ c.ElasticacheSDConfig.AccessKey = c.AccessKey
+ }
+ if c.SecretKey != "" {
+ c.ElasticacheSDConfig.SecretKey = c.SecretKey
+ }
+ if c.Profile != "" {
+ c.ElasticacheSDConfig.Profile = c.Profile
+ }
+ if c.RoleARN != "" {
+ c.ElasticacheSDConfig.RoleARN = c.RoleARN
+ }
+ if c.Port != 0 {
+ c.ElasticacheSDConfig.Port = c.Port
+ }
+ if c.RefreshInterval != 0 {
+ c.ElasticacheSDConfig.RefreshInterval = c.RefreshInterval
+ }
+ if c.Clusters != nil {
+ c.ElasticacheSDConfig.Clusters = c.Clusters
+ }
case RoleLightsail:
if c.LightsailSDConfig == nil {
lightsailConfig := DefaultLightsailSDConfig
@@ -259,6 +292,9 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di
case RoleECS:
opts.Metrics = &ecsMetrics{refreshMetrics: awsMetrics.refreshMetrics}
return NewECSDiscovery(c.ECSSDConfig, opts)
+ case RoleElasticache:
+ opts.Metrics = &elasticacheMetrics{refreshMetrics: awsMetrics.refreshMetrics}
+ return NewElasticacheDiscovery(c.ElasticacheSDConfig, opts)
case RoleLightsail:
opts.Metrics = &lightsailMetrics{refreshMetrics: awsMetrics.refreshMetrics}
return NewLightsailDiscovery(c.LightsailSDConfig, opts)
diff --git a/discovery/aws/elasticache.go b/discovery/aws/elasticache.go
new file mode 100644
index 0000000000..7ed598e294
--- /dev/null
+++ b/discovery/aws/elasticache.go
@@ -0,0 +1,907 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aws
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "maps"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsConfig "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+ "github.com/aws/aws-sdk-go-v2/service/elasticache"
+ "github.com/aws/aws-sdk-go-v2/service/elasticache/types"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/common/promslog"
+ "golang.org/x/sync/errgroup"
+
+ "github.com/prometheus/prometheus/discovery"
+ "github.com/prometheus/prometheus/discovery/refresh"
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/prometheus/prometheus/util/strutil"
+)
+
+const (
+ elasticacheLabel = model.MetaLabelPrefix + "elasticache_"
+ elasticacheLabelDeploymentOption = elasticacheLabel + "deployment_option"
+
+ // cache cluster.
+ elasticacheLabelCacheCluster = elasticacheLabel + "cache_cluster_"
+ elasticacheLabelCacheClusterARN = elasticacheLabelCacheCluster + "arn"
+ elasticacheLabelCacheClusterAtRestEncryptionEnabled = elasticacheLabelCacheCluster + "at_rest_encryption_enabled"
+ elasticacheLabelCacheClusterAuthTokenEnabled = elasticacheLabelCacheCluster + "auth_token_enabled"
+ elasticacheLabelCacheClusterAuthTokenLastModified = elasticacheLabelCacheCluster + "auth_token_last_modified"
+ elasticacheLabelCacheClusterAutoMinorVersionUpgrade = elasticacheLabelCacheCluster + "auto_minor_version_upgrade"
+ elasticacheLabelCacheClusterCreateTime = elasticacheLabelCacheCluster + "cache_cluster_create_time"
+ elasticacheLabelCacheClusterID = elasticacheLabelCacheCluster + "cache_cluster_id"
+ elasticacheLabelCacheClusterStatus = elasticacheLabelCacheCluster + "cache_cluster_status"
+ elasticacheLabelCacheClusterNodeType = elasticacheLabelCacheCluster + "cache_node_type"
+ elasticacheLabelCacheClusterParameterGroup = elasticacheLabelCacheCluster + "cache_parameter_group"
+ elasticacheLabelCacheClusterSubnetGroupName = elasticacheLabelCacheCluster + "cache_subnet_group_name"
+ elasticacheLabelCacheClusterClientDownloadLandingPage = elasticacheLabelCacheCluster + "client_download_landing_page"
+ elasticacheLabelCacheClusterEngine = elasticacheLabelCacheCluster + "engine"
+ elasticacheLabelCacheClusterEngineVersion = elasticacheLabelCacheCluster + "engine_version"
+ elasticacheLabelCacheClusterIPDiscovery = elasticacheLabelCacheCluster + "ip_discovery"
+ elasticacheLabelCacheClusterNetworkType = elasticacheLabelCacheCluster + "network_type"
+ elasticacheLabelCacheClusterNumCacheNodes = elasticacheLabelCacheCluster + "num_cache_nodes"
+ elasticacheLabelCacheClusterPreferredAvailabilityZone = elasticacheLabelCacheCluster + "preferred_availability_zone"
+ elasticacheLabelCacheClusterPreferredMaintenanceWindow = elasticacheLabelCacheCluster + "preferred_maintenance_window"
+ elasticacheLabelCacheClusterPreferredOutpostARN = elasticacheLabelCacheCluster + "preferred_outpost_arn"
+ elasticacheLabelCacheClusterReplicationGroupID = elasticacheLabelCacheCluster + "replication_group_id"
+ elasticacheLabelCacheClusterReplicationGroupLogDeliveryEnabled = elasticacheLabelCacheCluster + "replication_group_log_delivery_enabled"
+ elasticacheLabelCacheClusterSnapshotRetentionLimit = elasticacheLabelCacheCluster + "snapshot_retention_limit"
+ elasticacheLabelCacheClusterSnapshotWindow = elasticacheLabelCacheCluster + "snapshot_window"
+ elasticacheLabelCacheClusterTransitEncryptionEnabled = elasticacheLabelCacheCluster + "transit_encryption_enabled"
+ elasticacheLabelCacheClusterTransitEncryptionMode = elasticacheLabelCacheCluster + "transit_encryption_mode"
+
+ // configuration endpoint.
+ elasticacheLabelCacheClusterConfigurationEndpoint = elasticacheLabelCacheCluster + "configuration_endpoint_"
+ elasticacheLabelCacheClusterConfigurationEndpointAddress = elasticacheLabelCacheClusterConfigurationEndpoint + "address"
+ elasticacheLabelCacheClusterConfigurationEndpointPort = elasticacheLabelCacheClusterConfigurationEndpoint + "port"
+
+ // notification.
+ elasticacheLabelCacheClusterNotification = elasticacheLabelCacheCluster + "notification_"
+ elasticacheLabelCacheClusterNotificationTopicARN = elasticacheLabelCacheClusterNotification + "topic_arn"
+ elasticacheLabelCacheClusterNotificationTopicStatus = elasticacheLabelCacheClusterNotification + "topic_status"
+
+ // log delivery configuration (slice - use with index).
+ elasticacheLabelCacheClusterLogDeliveryConfiguration = elasticacheLabelCacheCluster + "log_delivery_configuration_"
+ elasticacheLabelCacheClusterLogDeliveryConfigurationDestinationType = elasticacheLabelCacheClusterLogDeliveryConfiguration + "destination_type"
+ elasticacheLabelCacheClusterLogDeliveryConfigurationLogFormat = elasticacheLabelCacheClusterLogDeliveryConfiguration + "log_format"
+ elasticacheLabelCacheClusterLogDeliveryConfigurationLogType = elasticacheLabelCacheClusterLogDeliveryConfiguration + "log_type"
+ elasticacheLabelCacheClusterLogDeliveryConfigurationStatus = elasticacheLabelCacheClusterLogDeliveryConfiguration + "status"
+ elasticacheLabelCacheClusterLogDeliveryConfigurationMessage = elasticacheLabelCacheClusterLogDeliveryConfiguration + "message"
+ elasticacheLabelCacheClusterLogDeliveryConfigurationLogGroup = elasticacheLabelCacheClusterLogDeliveryConfiguration + "log_group"
+ elasticacheLabelCacheClusterLogDeliveryConfigurationDeliveryStream = elasticacheLabelCacheClusterLogDeliveryConfiguration + "delivery_stream"
+
+ // pending modified values.
+ elasticacheLabelCacheClusterPendingModifiedValues = elasticacheLabelCacheCluster + "pending_modified_values_"
+ elasticacheLabelCacheClusterPendingModifiedValuesAuthTokenStatus = elasticacheLabelCacheClusterPendingModifiedValues + "auth_token_status"
+ elasticacheLabelCacheClusterPendingModifiedValuesCacheNodeType = elasticacheLabelCacheClusterPendingModifiedValues + "cache_node_type"
+ elasticacheLabelCacheClusterPendingModifiedValuesEngineVersion = elasticacheLabelCacheClusterPendingModifiedValues + "engine_version"
+ elasticacheLabelCacheClusterPendingModifiedValuesNumCacheNodes = elasticacheLabelCacheClusterPendingModifiedValues + "num_cache_nodes"
+ elasticacheLabelCacheClusterPendingModifiedValuesTransitEncryptionEnabled = elasticacheLabelCacheClusterPendingModifiedValues + "transit_encryption_enabled"
+ elasticacheLabelCacheClusterPendingModifiedValuesTransitEncryptionMode = elasticacheLabelCacheClusterPendingModifiedValues + "transit_encryption_mode"
+ elasticacheLabelCacheClusterPendingModifiedValuesCacheNodeIDsToRemove = elasticacheLabelCacheClusterPendingModifiedValues + "cache_node_ids_to_remove"
+
+ // security group membership (slice - use with index).
+ elasticacheLabelCacheClusterSecurityGroupMembership = elasticacheLabelCacheCluster + "security_group_membership_"
+ elasticacheLabelCacheClusterSecurityGroupMembershipID = elasticacheLabelCacheClusterSecurityGroupMembership + "id"
+ elasticacheLabelCacheClusterSecurityGroupMembershipStatus = elasticacheLabelCacheClusterSecurityGroupMembership + "status"
+
+ // tags - create one label per tag key, with the format: elasticache_cache_cluster_tag_.
+ elasticacheLabelCacheClusterTag = elasticacheLabelCacheCluster + "tag_"
+
+ // node.
+ elasticacheLabelCacheClusterNode = elasticacheLabelCacheCluster + "node_"
+ elasticacheLabelCacheClusterNodeCreateTime = elasticacheLabelCacheClusterNode + "create_time"
+ elasticacheLabelCacheClusterNodeID = elasticacheLabelCacheClusterNode + "id"
+ elasticacheLabelCacheClusterNodeStatus = elasticacheLabelCacheClusterNode + "status"
+ elasticacheLabelCacheClusterNodeAZ = elasticacheLabelCacheClusterNode + "availability_zone"
+ elasticacheLabelCacheClusterNodeCustomerOutpostARN = elasticacheLabelCacheClusterNode + "customer_outpost_arn"
+ elasticacheLabelCacheClusterNodeSourceCacheNodeID = elasticacheLabelCacheClusterNode + "source_cache_node_id"
+ elasticacheLabelCacheClusterNodeParameterGroupStatus = elasticacheLabelCacheClusterNode + "parameter_group_status"
+
+ // endpoint.
+ elasticacheLabelCacheClusterNodeEndpoint = elasticacheLabelCacheClusterNode + "endpoint_"
+ elasticacheLabelCacheClusterNodeEndpointAddress = elasticacheLabelCacheClusterNodeEndpoint + "address"
+ elasticacheLabelCacheClusterNodeEndpointPort = elasticacheLabelCacheClusterNodeEndpoint + "port"
+
+ // serverless cache.
+ elasticacheLabelServerlessCache = elasticacheLabel + "serverless_cache_"
+ elasticacheLabelServerlessCacheARN = elasticacheLabelServerlessCache + "arn"
+ elasticacheLabelServerlessCacheName = elasticacheLabelServerlessCache + "name"
+ elasticacheLabelServerlessCacheCreateTime = elasticacheLabelServerlessCache + "create_time"
+ elasticacheLabelServerlessCacheDescription = elasticacheLabelServerlessCache + "description"
+ elasticacheLabelServerlessCacheEngine = elasticacheLabelServerlessCache + "engine"
+ elasticacheLabelServerlessCacheFullEngineVersion = elasticacheLabelServerlessCache + "full_engine_version"
+ elasticacheLabelServerlessCacheMajorEngineVersion = elasticacheLabelServerlessCache + "major_engine_version"
+ elasticacheLabelServerlessCacheStatus = elasticacheLabelServerlessCache + "status"
+ elasticacheLabelServerlessCacheKmsKeyID = elasticacheLabelServerlessCache + "kms_key_id"
+ elasticacheLabelServerlessCacheUserGroupID = elasticacheLabelServerlessCache + "user_group_id"
+ elasticacheLabelServerlessCacheDailySnapshotTime = elasticacheLabelServerlessCache + "daily_snapshot_time"
+ elasticacheLabelServerlessCacheSnapshotRetentionLimit = elasticacheLabelServerlessCache + "snapshot_retention_limit"
+
+ // endpoint.
+ elasticacheLabelServerlessCacheEndpoint = elasticacheLabelServerlessCache + "endpoint_"
+ elasticacheLabelServerlessCacheEndpointAddress = elasticacheLabelServerlessCacheEndpoint + "address"
+ elasticacheLabelServerlessCacheEndpointPort = elasticacheLabelServerlessCacheEndpoint + "port"
+ elasticacheLabelServerlessCacheReaderEndpointAddress = elasticacheLabelServerlessCacheEndpoint + "reader_address"
+ elasticacheLabelServerlessCacheReaderEndpointPort = elasticacheLabelServerlessCacheEndpoint + "reader_port"
+
+ // security group membership (slice - use with index).
+ elasticacheLabelServerlessCacheSecurityGroupID = elasticacheLabelServerlessCache + "security_group_id"
+
+ // Subnet group membership (slice - use with index).
+ elasticacheLabelServerlessCacheSubnetID = elasticacheLabelServerlessCache + "subnet_id"
+
+ // cache usage limits.
+ elasticacheLabelServerlessCacheCacheUsageLimit = elasticacheLabelServerlessCache + "cache_usage_limit_"
+ elasticacheLabelServerlessCacheCacheUsageLimitCacheDataStorage = elasticacheLabelServerlessCacheCacheUsageLimit + "data_storage"
+ elasticacheLabelServerlessCacheCacheUsageLimitCacheDataStorageMaximum = elasticacheLabelServerlessCacheCacheUsageLimitCacheDataStorage + "maximum"
+ elasticacheLabelServerlessCacheCacheUsageLimitCacheDataStorageMinimum = elasticacheLabelServerlessCacheCacheUsageLimitCacheDataStorage + "minimum"
+ elasticacheLabelServerlessCacheCacheUsageLimitCacheDataStorageUnit = elasticacheLabelServerlessCacheCacheUsageLimitCacheDataStorage + "unit"
+ elasticacheLabelServerlessCacheCacheUsageLimitECPUPerSecond = elasticacheLabelServerlessCacheCacheUsageLimit + "ecpu_per_second"
+ elasticacheLabelServerlessCacheCacheUsageLimitECPUPerSecondMaximum = elasticacheLabelServerlessCacheCacheUsageLimitECPUPerSecond + "maximum"
+ elasticacheLabelServerlessCacheCacheUsageLimitECPUPerSecondMinimum = elasticacheLabelServerlessCacheCacheUsageLimitECPUPerSecond + "minimum"
+
+ // tags - create one label per tag key, with the format: elasticache_serverless_cache_tag_.
+ elasticacheLabelServerlessCacheTag = elasticacheLabelServerlessCache + "tag_"
+)
+
+// DefaultElasticacheSDConfig is the default Elasticache SD configuration.
+var DefaultElasticacheSDConfig = ElasticacheSDConfig{
+ Port: 80,
+ RefreshInterval: model.Duration(60 * time.Second),
+ RequestConcurrency: 10,
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
+}
+
+func init() {
+ discovery.RegisterConfig(&ElasticacheSDConfig{})
+}
+
+// ElasticacheSDConfig is the configuration for Elasticache based service discovery.
+type ElasticacheSDConfig struct {
+ Region string `yaml:"region"`
+ Endpoint string `yaml:"endpoint"`
+ AccessKey string `yaml:"access_key,omitempty"`
+ SecretKey config.Secret `yaml:"secret_key,omitempty"`
+ Profile string `yaml:"profile,omitempty"`
+ RoleARN string `yaml:"role_arn,omitempty"`
+ Clusters []string `yaml:"clusters,omitempty"`
+ Port int `yaml:"port"`
+ RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
+
+ // RequestConcurrency controls the maximum number of concurrent Elasticache API requests.
+ RequestConcurrency int `yaml:"request_concurrency,omitempty"`
+
+ HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
+}
+
+// NewDiscovererMetrics implements discovery.Config.
+func (*ElasticacheSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
+ return &elasticacheMetrics{
+ refreshMetrics: rmi,
+ }
+}
+
+// Name returns the name of the Elasticache Config.
+func (*ElasticacheSDConfig) Name() string { return "elasticache" }
+
+// NewDiscoverer returns a Discoverer for the Elasticache Config.
+func (c *ElasticacheSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
+ return NewElasticacheDiscovery(c, opts)
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface for the Elasticache Config.
+func (c *ElasticacheSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
+ *c = DefaultElasticacheSDConfig
+ type plain ElasticacheSDConfig
+ err := unmarshal((*plain)(c))
+ if err != nil {
+ return err
+ }
+
+ c.Region, err = loadRegion(context.Background(), c.Region)
+ if err != nil {
+ return fmt.Errorf("could not determine AWS region: %w", err)
+ }
+
+ return c.HTTPClientConfig.Validate()
+}
+
+type elasticacheClient interface {
+ DescribeServerlessCaches(ctx context.Context, params *elasticache.DescribeServerlessCachesInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeServerlessCachesOutput, error)
+ DescribeCacheClusters(ctx context.Context, params *elasticache.DescribeCacheClustersInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error)
+ ListTagsForResource(ctx context.Context, params *elasticache.ListTagsForResourceInput, optFns ...func(*elasticache.Options)) (*elasticache.ListTagsForResourceOutput, error)
+}
+
+// ElasticacheDiscovery periodically performs Elasticache-SD requests.
+// It implements the Discoverer interface.
+type ElasticacheDiscovery struct {
+ *refresh.Discovery
+ logger *slog.Logger
+ cfg *ElasticacheSDConfig
+ elasticacheClient elasticacheClient
+}
+
+// NewElasticacheDiscovery returns a new ElasticacheDiscovery which periodically refreshes its targets.
+func NewElasticacheDiscovery(conf *ElasticacheSDConfig, opts discovery.DiscovererOptions) (*ElasticacheDiscovery, error) {
+ m, ok := opts.Metrics.(*elasticacheMetrics)
+ if !ok {
+ return nil, errors.New("invalid discovery metrics type")
+ }
+
+ if opts.Logger == nil {
+ opts.Logger = promslog.NewNopLogger()
+ }
+ d := &ElasticacheDiscovery{
+ logger: opts.Logger,
+ cfg: conf,
+ }
+ d.Discovery = refresh.NewDiscovery(
+ refresh.Options{
+ Logger: opts.Logger,
+ Mech: "elasticache",
+ Interval: time.Duration(d.cfg.RefreshInterval),
+ RefreshF: d.refresh,
+ MetricsInstantiator: m.refreshMetrics,
+ },
+ )
+ return d, nil
+}
+
+func (d *ElasticacheDiscovery) initElasticacheClient(ctx context.Context) error {
+ if d.elasticacheClient != nil {
+ return nil
+ }
+
+ if d.cfg.Region == "" {
+ return errors.New("region must be set for Elasticache service discovery")
+ }
+
+ // Build the HTTP client from the provided HTTPClientConfig.
+ client, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "elasticache_sd")
+ if err != nil {
+ return err
+ }
+
+ // Build the AWS config with the provided region.
+ var configOptions []func(*awsConfig.LoadOptions) error
+ configOptions = append(configOptions, awsConfig.WithRegion(d.cfg.Region))
+ configOptions = append(configOptions, awsConfig.WithHTTPClient(client))
+
+ // Only set static credentials if both access key and secret key are provided
+ // Otherwise, let AWS SDK use its default credential chain
+ if d.cfg.AccessKey != "" && d.cfg.SecretKey != "" {
+ credProvider := credentials.NewStaticCredentialsProvider(d.cfg.AccessKey, string(d.cfg.SecretKey), "")
+ configOptions = append(configOptions, awsConfig.WithCredentialsProvider(credProvider))
+ }
+
+ if d.cfg.Profile != "" {
+ configOptions = append(configOptions, awsConfig.WithSharedConfigProfile(d.cfg.Profile))
+ }
+
+ cfg, err := awsConfig.LoadDefaultConfig(ctx, configOptions...)
+ if err != nil {
+ d.logger.Error("Failed to create AWS config", "error", err)
+ return fmt.Errorf("could not create aws config: %w", err)
+ }
+
+ // If the role ARN is set, assume the role to get credentials and set the credentials provider in the config.
+ if d.cfg.RoleARN != "" {
+ assumeProvider := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), d.cfg.RoleARN)
+ cfg.Credentials = aws.NewCredentialsCache(assumeProvider)
+ }
+
+ d.elasticacheClient = elasticache.NewFromConfig(cfg, func(options *elasticache.Options) {
+ if d.cfg.Endpoint != "" {
+ options.BaseEndpoint = &d.cfg.Endpoint
+ }
+ options.HTTPClient = client
+ })
+
+ // Test credentials by making a simple API call
+ testCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+
+ _, err = d.elasticacheClient.DescribeCacheClusters(testCtx, &elasticache.DescribeCacheClustersInput{})
+ if err != nil {
+ d.logger.Error("Failed to test Elasticache credentials", "error", err)
+ return fmt.Errorf("elasticache credential test failed: %w", err)
+ }
+
+ return nil
+}
+
+// describeServerlessCaches calls DescribeServerlessCaches API for the given cache IDs (or all caches if no IDs are provided) and returns the list of serverless caches.
+func (d *ElasticacheDiscovery) describeServerlessCaches(ctx context.Context, caches []string) ([]types.ServerlessCache, error) {
+ mu := &sync.Mutex{}
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ var serverlessCaches []types.ServerlessCache
+ if len(caches) == 0 {
+ errg.Go(func() error {
+ var nextToken *string
+ for {
+ output, err := d.elasticacheClient.DescribeServerlessCaches(ectx, &elasticache.DescribeServerlessCachesInput{
+ MaxResults: aws.Int32(50),
+ NextToken: nextToken,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to describe serverless caches: %w", err)
+ }
+ mu.Lock()
+ serverlessCaches = append(serverlessCaches, output.ServerlessCaches...)
+ mu.Unlock()
+ if output.NextToken == nil {
+ break
+ }
+ nextToken = output.NextToken
+ }
+ return nil
+ })
+ } else {
+ for _, cacheID := range caches {
+ errg.Go(func() error {
+ output, err := d.elasticacheClient.DescribeServerlessCaches(ectx, &elasticache.DescribeServerlessCachesInput{
+ MaxResults: aws.Int32(50),
+ NextToken: nil,
+ ServerlessCacheName: aws.String(cacheID),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to describe serverless cache %s: %w", cacheID, err)
+ }
+ mu.Lock()
+ serverlessCaches = append(serverlessCaches, output.ServerlessCaches...)
+ mu.Unlock()
+ return nil
+ })
+ }
+ }
+
+ return serverlessCaches, errg.Wait()
+}
+
+// describeCacheClusters calls DescribeCacheClusters API for the given cache cluster IDs (or all cache clusters if no IDs are provided) and returns the list of cache clusters.
+func (d *ElasticacheDiscovery) describeCacheClusters(ctx context.Context, caches []string) ([]types.CacheCluster, error) {
+ mu := &sync.Mutex{}
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ showCacheClustersNotInReplicationGroupsBools := []bool{false, true}
+ var cacheClusters []types.CacheCluster
+ if len(caches) == 0 {
+ for _, showCacheClustersNotInReplicationGroupsBool := range showCacheClustersNotInReplicationGroupsBools {
+ errg.Go(func() error {
+ var nextToken *string
+ for {
+ output, err := d.elasticacheClient.DescribeCacheClusters(ectx, &elasticache.DescribeCacheClustersInput{
+ MaxRecords: aws.Int32(100),
+ Marker: nextToken,
+ ShowCacheNodeInfo: aws.Bool(true),
+ ShowCacheClustersNotInReplicationGroups: aws.Bool(showCacheClustersNotInReplicationGroupsBool),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to describe cache clusters: %w", err)
+ }
+ mu.Lock()
+ cacheClusters = append(cacheClusters, output.CacheClusters...)
+ mu.Unlock()
+ if output.Marker == nil {
+ break
+ }
+ nextToken = output.Marker
+ }
+ return nil
+ })
+ }
+ } else {
+ for _, cacheID := range caches {
+ for _, showCacheClustersNotInReplicationGroupsBool := range showCacheClustersNotInReplicationGroupsBools {
+ errg.Go(func() error {
+ output, err := d.elasticacheClient.DescribeCacheClusters(ectx, &elasticache.DescribeCacheClustersInput{
+ MaxRecords: aws.Int32(100),
+ Marker: nil,
+ ShowCacheNodeInfo: aws.Bool(true),
+ ShowCacheClustersNotInReplicationGroups: aws.Bool(showCacheClustersNotInReplicationGroupsBool),
+ CacheClusterId: aws.String(cacheID),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to describe cache cluster %s: %w", cacheID, err)
+ }
+ mu.Lock()
+ cacheClusters = append(cacheClusters, output.CacheClusters...)
+ mu.Unlock()
+ return nil
+ })
+ }
+ }
+ }
+
+ return cacheClusters, errg.Wait()
+}
+
+// listTagsForResource calls ListTagsForResource API for the given resource ARNs and returns a map of resource ARN to list of tags.
+func (d *ElasticacheDiscovery) listTagsForResource(ctx context.Context, resourceARNs []string) (map[string][]types.Tag, error) {
+ mu := &sync.Mutex{}
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.SetLimit(d.cfg.RequestConcurrency)
+ tagsByResourceARN := make(map[string][]types.Tag)
+ for _, resourceARN := range resourceARNs {
+ errg.Go(func() error {
+ output, err := d.elasticacheClient.ListTagsForResource(ectx, &elasticache.ListTagsForResourceInput{
+ ResourceName: aws.String(resourceARN),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to list tags for resource %s: %w", resourceARN, err)
+ }
+ mu.Lock()
+ tagsByResourceARN[resourceARN] = output.TagList
+ mu.Unlock()
+ return nil
+ })
+ }
+ return tagsByResourceARN, errg.Wait()
+}
+
+func (d *ElasticacheDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+ err := d.initElasticacheClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ var clusters []string
+ clustersMu := sync.Mutex{}
+ serverlessCacheIDs, cacheClusterIDs := splitCacheDeploymentOptions(d.cfg.Clusters)
+
+ clusterErrg, clusterCtx := errgroup.WithContext(ctx)
+ clusterErrg.Go(func() error {
+ caches, err := d.describeServerlessCaches(clusterCtx, serverlessCacheIDs)
+ if err != nil {
+ return fmt.Errorf("failed to describe serverless caches: %w", err)
+ }
+ for _, cache := range caches {
+ clustersMu.Lock()
+ clusters = append(clusters, *cache.ARN)
+ clustersMu.Unlock()
+ }
+ return nil
+ })
+
+ clusterErrg.Go(func() error {
+ cacheClusters, err := d.describeCacheClusters(clusterCtx, cacheClusterIDs)
+ if err != nil {
+ return fmt.Errorf("failed to describe cache clusters: %w", err)
+ }
+ for _, cluster := range cacheClusters {
+ clustersMu.Lock()
+ clusters = append(clusters, *cluster.ARN)
+ clustersMu.Unlock()
+ }
+ return nil
+ })
+
+ if err := clusterErrg.Wait(); err != nil {
+ return nil, err
+ }
+
+ tagsByResourceARN, err := d.listTagsForResource(ctx, clusters)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list tags for resources: %w", err)
+ }
+
+ tg := &targetgroup.Group{
+ Source: d.cfg.Region,
+ }
+
+ errg, ectx := errgroup.WithContext(ctx)
+ errg.Go(func() error {
+ caches, err := d.describeServerlessCaches(ectx, serverlessCacheIDs)
+ if err != nil {
+ return fmt.Errorf("failed to describe serverless caches: %w", err)
+ }
+ for _, cache := range caches {
+ addServerlessCacheTargets(tg, &cache, tagsByResourceARN[*cache.ARN])
+ }
+ return nil
+ })
+
+ errg.Go(func() error {
+ cacheClusters, err := d.describeCacheClusters(ectx, cacheClusterIDs)
+ if err != nil {
+ return fmt.Errorf("failed to describe cache clusters: %w", err)
+ }
+ for _, cluster := range cacheClusters {
+ addCacheClusterTargets(tg, &cluster, tagsByResourceARN[*cluster.ARN])
+ }
+ return nil
+ })
+
+ if err := errg.Wait(); err != nil {
+ return nil, err
+ }
+
+ return []*targetgroup.Group{tg}, nil
+}
+
+// splitCacheTypes takes a list of cache ARNs and splits them into serverless cache IDs and cache cluster IDs based on their format.
+// Serverless caches are in the format arn:aws:elasticache:::serverlesscache:
+// Cache clusters are in the format arn:aws:elasticache:::replicationgroup:.
+func splitCacheDeploymentOptions(caches []string) (serverlessCacheIDs, cacheClusterIDs []string) {
+ for _, cacheARN := range caches {
+ if len(cacheARN) == 0 {
+ continue
+ }
+ parts := strings.Split(cacheARN, ":")
+ if len(parts) < 6 {
+ continue
+ }
+ resourceType := parts[5]
+ resourceID := parts[6]
+ switch resourceType {
+ case "serverlesscache":
+ serverlessCacheIDs = append(serverlessCacheIDs, resourceID)
+ case "replicationgroup":
+ cacheClusterIDs = append(cacheClusterIDs, resourceID)
+ default:
+ continue
+ }
+ }
+ return serverlessCacheIDs, cacheClusterIDs
+}
+
+// addServerlessCacheTargets adds targets for a serverless cache to the target group.
+func addServerlessCacheTargets(tg *targetgroup.Group, cache *types.ServerlessCache, tags []types.Tag) {
+ labels := model.LabelSet{
+ elasticacheLabelDeploymentOption: model.LabelValue("serverless"),
+ elasticacheLabelServerlessCacheARN: model.LabelValue(*cache.ARN),
+ elasticacheLabelServerlessCacheName: model.LabelValue(*cache.ServerlessCacheName),
+ elasticacheLabelServerlessCacheStatus: model.LabelValue(*cache.Status),
+ elasticacheLabelServerlessCacheEngine: model.LabelValue(*cache.Engine),
+ elasticacheLabelServerlessCacheFullEngineVersion: model.LabelValue(*cache.FullEngineVersion),
+ elasticacheLabelServerlessCacheMajorEngineVersion: model.LabelValue(*cache.MajorEngineVersion),
+ }
+
+ if cache.Description != nil {
+ labels[elasticacheLabelServerlessCacheDescription] = model.LabelValue(*cache.Description)
+ }
+
+ if cache.CreateTime != nil {
+ labels[elasticacheLabelServerlessCacheCreateTime] = model.LabelValue(cache.CreateTime.Format(time.RFC3339))
+ }
+
+ if cache.KmsKeyId != nil {
+ labels[elasticacheLabelServerlessCacheKmsKeyID] = model.LabelValue(*cache.KmsKeyId)
+ }
+
+ if cache.UserGroupId != nil {
+ labels[elasticacheLabelServerlessCacheUserGroupID] = model.LabelValue(*cache.UserGroupId)
+ }
+
+ if cache.DailySnapshotTime != nil {
+ labels[elasticacheLabelServerlessCacheDailySnapshotTime] = model.LabelValue(*cache.DailySnapshotTime)
+ }
+
+ if cache.SnapshotRetentionLimit != nil {
+ labels[elasticacheLabelServerlessCacheSnapshotRetentionLimit] = model.LabelValue(strconv.Itoa(int(*cache.SnapshotRetentionLimit)))
+ }
+
+ if cache.Endpoint != nil {
+ if cache.Endpoint.Address != nil {
+ labels[elasticacheLabelServerlessCacheEndpointAddress] = model.LabelValue(*cache.Endpoint.Address)
+ }
+ if cache.Endpoint.Port != nil {
+ labels[elasticacheLabelServerlessCacheEndpointPort] = model.LabelValue(strconv.Itoa(int(*cache.Endpoint.Port)))
+ }
+ }
+
+ if cache.ReaderEndpoint != nil {
+ if cache.ReaderEndpoint.Address != nil {
+ labels[elasticacheLabelServerlessCacheReaderEndpointAddress] = model.LabelValue(*cache.ReaderEndpoint.Address)
+ }
+ if cache.ReaderEndpoint.Port != nil {
+ labels[elasticacheLabelServerlessCacheReaderEndpointPort] = model.LabelValue(strconv.Itoa(int(*cache.ReaderEndpoint.Port)))
+ }
+ }
+
+ for i, sgID := range cache.SecurityGroupIds {
+ labels[model.LabelName(fmt.Sprintf("%s_%d", elasticacheLabelServerlessCacheSecurityGroupID, i))] = model.LabelValue(sgID)
+ }
+
+ for i, subnetID := range cache.SubnetIds {
+ labels[model.LabelName(fmt.Sprintf("%s_%d", elasticacheLabelServerlessCacheSubnetID, i))] = model.LabelValue(subnetID)
+ }
+
+ if cache.CacheUsageLimits != nil {
+ if cache.CacheUsageLimits.DataStorage != nil {
+ if cache.CacheUsageLimits.DataStorage.Maximum != nil {
+ labels[elasticacheLabelServerlessCacheCacheUsageLimitCacheDataStorageMaximum] = model.LabelValue(strconv.Itoa(int(*cache.CacheUsageLimits.DataStorage.Maximum)))
+ }
+ if cache.CacheUsageLimits.DataStorage.Minimum != nil {
+ labels[elasticacheLabelServerlessCacheCacheUsageLimitCacheDataStorageMinimum] = model.LabelValue(strconv.Itoa(int(*cache.CacheUsageLimits.DataStorage.Minimum)))
+ }
+ labels[elasticacheLabelServerlessCacheCacheUsageLimitCacheDataStorageUnit] = model.LabelValue(cache.CacheUsageLimits.DataStorage.Unit)
+ }
+ if cache.CacheUsageLimits.ECPUPerSecond != nil {
+ if cache.CacheUsageLimits.ECPUPerSecond.Maximum != nil {
+ labels[elasticacheLabelServerlessCacheCacheUsageLimitECPUPerSecondMaximum] = model.LabelValue(strconv.Itoa(int(*cache.CacheUsageLimits.ECPUPerSecond.Maximum)))
+ }
+ if cache.CacheUsageLimits.ECPUPerSecond.Minimum != nil {
+ labels[elasticacheLabelServerlessCacheCacheUsageLimitECPUPerSecondMinimum] = model.LabelValue(strconv.Itoa(int(*cache.CacheUsageLimits.ECPUPerSecond.Minimum)))
+ }
+ }
+ }
+
+ for _, tag := range tags {
+ if tag.Key != nil && tag.Value != nil {
+ labels[model.LabelName(elasticacheLabelServerlessCacheTag+strutil.SanitizeLabelName(*tag.Key))] = model.LabelValue(*tag.Value)
+ }
+ }
+
+ // Set the address label using the endpoint
+ if cache.Endpoint != nil && cache.Endpoint.Address != nil && cache.Endpoint.Port != nil {
+ labels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(*cache.Endpoint.Address, strconv.Itoa(int(*cache.Endpoint.Port))))
+ }
+
+ tg.Targets = append(tg.Targets, labels)
+}
+
+// addCacheClusterTargets adds targets for a cache cluster to the target group.
+// Creates one target per cache node for individual scraping.
+func addCacheClusterTargets(tg *targetgroup.Group, cluster *types.CacheCluster, tags []types.Tag) {
+ // Build common labels that apply to all nodes in this cluster
+ commonLabels := model.LabelSet{
+ elasticacheLabelDeploymentOption: model.LabelValue("node"),
+ elasticacheLabelCacheClusterARN: model.LabelValue(*cluster.ARN),
+ elasticacheLabelCacheClusterID: model.LabelValue(*cluster.CacheClusterId),
+ elasticacheLabelCacheClusterStatus: model.LabelValue(*cluster.CacheClusterStatus),
+ }
+
+ if cluster.AtRestEncryptionEnabled != nil {
+ commonLabels[elasticacheLabelCacheClusterAtRestEncryptionEnabled] = model.LabelValue(strconv.FormatBool(*cluster.AtRestEncryptionEnabled))
+ }
+
+ if cluster.AuthTokenEnabled != nil {
+ commonLabels[elasticacheLabelCacheClusterAuthTokenEnabled] = model.LabelValue(strconv.FormatBool(*cluster.AuthTokenEnabled))
+ }
+
+ if cluster.AuthTokenLastModifiedDate != nil {
+ commonLabels[elasticacheLabelCacheClusterAuthTokenLastModified] = model.LabelValue(cluster.AuthTokenLastModifiedDate.Format(time.RFC3339))
+ }
+
+ if cluster.AutoMinorVersionUpgrade != nil {
+ commonLabels[elasticacheLabelCacheClusterAutoMinorVersionUpgrade] = model.LabelValue(strconv.FormatBool(*cluster.AutoMinorVersionUpgrade))
+ }
+
+ if cluster.CacheClusterCreateTime != nil {
+ commonLabels[elasticacheLabelCacheClusterCreateTime] = model.LabelValue(cluster.CacheClusterCreateTime.Format(time.RFC3339))
+ }
+
+ if cluster.CacheNodeType != nil {
+ commonLabels[elasticacheLabelCacheClusterNodeType] = model.LabelValue(*cluster.CacheNodeType)
+ }
+
+ if cluster.CacheParameterGroup != nil && cluster.CacheParameterGroup.CacheParameterGroupName != nil {
+ commonLabels[elasticacheLabelCacheClusterParameterGroup] = model.LabelValue(*cluster.CacheParameterGroup.CacheParameterGroupName)
+ }
+
+ if cluster.CacheSubnetGroupName != nil {
+ commonLabels[elasticacheLabelCacheClusterSubnetGroupName] = model.LabelValue(*cluster.CacheSubnetGroupName)
+ }
+
+ if cluster.ClientDownloadLandingPage != nil {
+ commonLabels[elasticacheLabelCacheClusterClientDownloadLandingPage] = model.LabelValue(*cluster.ClientDownloadLandingPage)
+ }
+
+ if cluster.ConfigurationEndpoint != nil {
+ if cluster.ConfigurationEndpoint.Address != nil {
+ commonLabels[elasticacheLabelCacheClusterConfigurationEndpointAddress] = model.LabelValue(*cluster.ConfigurationEndpoint.Address)
+ }
+ if cluster.ConfigurationEndpoint.Port != nil {
+ commonLabels[elasticacheLabelCacheClusterConfigurationEndpointPort] = model.LabelValue(strconv.Itoa(int(*cluster.ConfigurationEndpoint.Port)))
+ }
+ }
+
+ if cluster.Engine != nil {
+ commonLabels[elasticacheLabelCacheClusterEngine] = model.LabelValue(*cluster.Engine)
+ }
+
+ if cluster.EngineVersion != nil {
+ commonLabels[elasticacheLabelCacheClusterEngineVersion] = model.LabelValue(*cluster.EngineVersion)
+ }
+
+ if len(cluster.IpDiscovery) > 0 {
+ commonLabels[elasticacheLabelCacheClusterIPDiscovery] = model.LabelValue(cluster.IpDiscovery)
+ }
+
+ if len(cluster.NetworkType) > 0 {
+ commonLabels[elasticacheLabelCacheClusterNetworkType] = model.LabelValue(cluster.NetworkType)
+ }
+
+ if cluster.NotificationConfiguration != nil {
+ if cluster.NotificationConfiguration.TopicArn != nil {
+ commonLabels[elasticacheLabelCacheClusterNotificationTopicARN] = model.LabelValue(*cluster.NotificationConfiguration.TopicArn)
+ }
+ if cluster.NotificationConfiguration.TopicStatus != nil {
+ commonLabels[elasticacheLabelCacheClusterNotificationTopicStatus] = model.LabelValue(*cluster.NotificationConfiguration.TopicStatus)
+ }
+ }
+
+ if cluster.NumCacheNodes != nil {
+ commonLabels[elasticacheLabelCacheClusterNumCacheNodes] = model.LabelValue(strconv.Itoa(int(*cluster.NumCacheNodes)))
+ }
+
+ if cluster.PreferredAvailabilityZone != nil {
+ commonLabels[elasticacheLabelCacheClusterPreferredAvailabilityZone] = model.LabelValue(*cluster.PreferredAvailabilityZone)
+ }
+
+ if cluster.PreferredMaintenanceWindow != nil {
+ commonLabels[elasticacheLabelCacheClusterPreferredMaintenanceWindow] = model.LabelValue(*cluster.PreferredMaintenanceWindow)
+ }
+
+ if cluster.PreferredOutpostArn != nil {
+ commonLabels[elasticacheLabelCacheClusterPreferredOutpostARN] = model.LabelValue(*cluster.PreferredOutpostArn)
+ }
+
+ if cluster.ReplicationGroupId != nil {
+ commonLabels[elasticacheLabelCacheClusterReplicationGroupID] = model.LabelValue(*cluster.ReplicationGroupId)
+ }
+
+ if cluster.ReplicationGroupLogDeliveryEnabled != nil {
+ commonLabels[elasticacheLabelCacheClusterReplicationGroupLogDeliveryEnabled] = model.LabelValue(strconv.FormatBool(*cluster.ReplicationGroupLogDeliveryEnabled))
+ }
+
+ if cluster.SnapshotRetentionLimit != nil {
+ commonLabels[elasticacheLabelCacheClusterSnapshotRetentionLimit] = model.LabelValue(strconv.Itoa(int(*cluster.SnapshotRetentionLimit)))
+ }
+
+ if cluster.SnapshotWindow != nil {
+ commonLabels[elasticacheLabelCacheClusterSnapshotWindow] = model.LabelValue(*cluster.SnapshotWindow)
+ }
+
+ if cluster.TransitEncryptionEnabled != nil {
+ commonLabels[elasticacheLabelCacheClusterTransitEncryptionEnabled] = model.LabelValue(strconv.FormatBool(*cluster.TransitEncryptionEnabled))
+ }
+
+ if len(cluster.TransitEncryptionMode) > 0 {
+ commonLabels[elasticacheLabelCacheClusterTransitEncryptionMode] = model.LabelValue(cluster.TransitEncryptionMode)
+ }
+
+ // Log delivery configurations (slice)
+ for i, logDelivery := range cluster.LogDeliveryConfigurations {
+ if len(logDelivery.DestinationType) > 0 {
+ commonLabels[model.LabelName(fmt.Sprintf("%s_%d", elasticacheLabelCacheClusterLogDeliveryConfigurationDestinationType, i))] = model.LabelValue(logDelivery.DestinationType)
+ }
+ if len(logDelivery.LogFormat) > 0 {
+ commonLabels[model.LabelName(fmt.Sprintf("%s_%d", elasticacheLabelCacheClusterLogDeliveryConfigurationLogFormat, i))] = model.LabelValue(logDelivery.LogFormat)
+ }
+ if len(logDelivery.LogType) > 0 {
+ commonLabels[model.LabelName(fmt.Sprintf("%s_%d", elasticacheLabelCacheClusterLogDeliveryConfigurationLogType, i))] = model.LabelValue(logDelivery.LogType)
+ }
+ if len(logDelivery.Status) > 0 {
+ commonLabels[model.LabelName(fmt.Sprintf("%s_%d", elasticacheLabelCacheClusterLogDeliveryConfigurationStatus, i))] = model.LabelValue(logDelivery.Status)
+ }
+ if logDelivery.Message != nil {
+ commonLabels[model.LabelName(fmt.Sprintf("%s_%d", elasticacheLabelCacheClusterLogDeliveryConfigurationMessage, i))] = model.LabelValue(*logDelivery.Message)
+ }
+ if logDelivery.DestinationDetails != nil {
+ if logDelivery.DestinationDetails.CloudWatchLogsDetails != nil && logDelivery.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil {
+ commonLabels[model.LabelName(fmt.Sprintf("%s_%d", elasticacheLabelCacheClusterLogDeliveryConfigurationLogGroup, i))] = model.LabelValue(*logDelivery.DestinationDetails.CloudWatchLogsDetails.LogGroup)
+ }
+ if logDelivery.DestinationDetails.KinesisFirehoseDetails != nil && logDelivery.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil {
+ commonLabels[model.LabelName(fmt.Sprintf("%s_%d", elasticacheLabelCacheClusterLogDeliveryConfigurationDeliveryStream, i))] = model.LabelValue(*logDelivery.DestinationDetails.KinesisFirehoseDetails.DeliveryStream)
+ }
+ }
+ }
+
+ // Pending modified values
+ if cluster.PendingModifiedValues != nil {
+ if len(cluster.PendingModifiedValues.AuthTokenStatus) > 0 {
+ commonLabels[elasticacheLabelCacheClusterPendingModifiedValuesAuthTokenStatus] = model.LabelValue(cluster.PendingModifiedValues.AuthTokenStatus)
+ }
+ if cluster.PendingModifiedValues.CacheNodeType != nil {
+ commonLabels[elasticacheLabelCacheClusterPendingModifiedValuesCacheNodeType] = model.LabelValue(*cluster.PendingModifiedValues.CacheNodeType)
+ }
+ if cluster.PendingModifiedValues.EngineVersion != nil {
+ commonLabels[elasticacheLabelCacheClusterPendingModifiedValuesEngineVersion] = model.LabelValue(*cluster.PendingModifiedValues.EngineVersion)
+ }
+ if cluster.PendingModifiedValues.NumCacheNodes != nil {
+ commonLabels[elasticacheLabelCacheClusterPendingModifiedValuesNumCacheNodes] = model.LabelValue(strconv.Itoa(int(*cluster.PendingModifiedValues.NumCacheNodes)))
+ }
+ if cluster.PendingModifiedValues.TransitEncryptionEnabled != nil {
+ commonLabels[elasticacheLabelCacheClusterPendingModifiedValuesTransitEncryptionEnabled] = model.LabelValue(strconv.FormatBool(*cluster.PendingModifiedValues.TransitEncryptionEnabled))
+ }
+ if len(cluster.PendingModifiedValues.TransitEncryptionMode) > 0 {
+ commonLabels[elasticacheLabelCacheClusterPendingModifiedValuesTransitEncryptionMode] = model.LabelValue(cluster.PendingModifiedValues.TransitEncryptionMode)
+ }
+ if len(cluster.PendingModifiedValues.CacheNodeIdsToRemove) > 0 {
+ commonLabels[elasticacheLabelCacheClusterPendingModifiedValuesCacheNodeIDsToRemove] = model.LabelValue(strings.Join(cluster.PendingModifiedValues.CacheNodeIdsToRemove, ","))
+ }
+ }
+
+ // Security group membership (slice)
+ for i, sg := range cluster.SecurityGroups {
+ if sg.SecurityGroupId != nil {
+ commonLabels[model.LabelName(fmt.Sprintf("%s_%d", elasticacheLabelCacheClusterSecurityGroupMembershipID, i))] = model.LabelValue(*sg.SecurityGroupId)
+ }
+ if sg.Status != nil {
+ commonLabels[model.LabelName(fmt.Sprintf("%s_%d", elasticacheLabelCacheClusterSecurityGroupMembershipStatus, i))] = model.LabelValue(*sg.Status)
+ }
+ }
+
+ // Tags
+ for _, tag := range tags {
+ if tag.Key != nil && tag.Value != nil {
+ commonLabels[model.LabelName(elasticacheLabelCacheClusterTag+strutil.SanitizeLabelName(*tag.Key))] = model.LabelValue(*tag.Value)
+ }
+ }
+
+ // Create one target per cache node
+ for _, node := range cluster.CacheNodes {
+ // Clone common labels for this node
+ labels := make(model.LabelSet, len(commonLabels))
+ maps.Copy(labels, commonLabels)
+
+ // Add node-specific labels
+ if node.CacheNodeId != nil {
+ labels[elasticacheLabelCacheClusterNodeID] = model.LabelValue(*node.CacheNodeId)
+ }
+ if node.CacheNodeStatus != nil {
+ labels[elasticacheLabelCacheClusterNodeStatus] = model.LabelValue(*node.CacheNodeStatus)
+ }
+ if node.CacheNodeCreateTime != nil {
+ labels[elasticacheLabelCacheClusterNodeCreateTime] = model.LabelValue(node.CacheNodeCreateTime.Format(time.RFC3339))
+ }
+ if node.CustomerAvailabilityZone != nil {
+ labels[elasticacheLabelCacheClusterNodeAZ] = model.LabelValue(*node.CustomerAvailabilityZone)
+ }
+ if node.CustomerOutpostArn != nil {
+ labels[elasticacheLabelCacheClusterNodeCustomerOutpostARN] = model.LabelValue(*node.CustomerOutpostArn)
+ }
+ if node.SourceCacheNodeId != nil {
+ labels[elasticacheLabelCacheClusterNodeSourceCacheNodeID] = model.LabelValue(*node.SourceCacheNodeId)
+ }
+ if node.ParameterGroupStatus != nil {
+ labels[elasticacheLabelCacheClusterNodeParameterGroupStatus] = model.LabelValue(*node.ParameterGroupStatus)
+ }
+ if node.Endpoint != nil {
+ if node.Endpoint.Address != nil {
+ labels[elasticacheLabelCacheClusterNodeEndpointAddress] = model.LabelValue(*node.Endpoint.Address)
+ }
+ if node.Endpoint.Port != nil {
+ labels[elasticacheLabelCacheClusterNodeEndpointPort] = model.LabelValue(strconv.Itoa(int(*node.Endpoint.Port)))
+ }
+
+ // Set the address label to this node's endpoint
+ if node.Endpoint.Address != nil && node.Endpoint.Port != nil {
+ labels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(*node.Endpoint.Address, strconv.Itoa(int(*node.Endpoint.Port))))
+ }
+ }
+
+ tg.Targets = append(tg.Targets, labels)
+ }
+}
diff --git a/discovery/aws/elasticache_test.go b/discovery/aws/elasticache_test.go
new file mode 100644
index 0000000000..4611f33059
--- /dev/null
+++ b/discovery/aws/elasticache_test.go
@@ -0,0 +1,615 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aws
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/elasticache"
+ "github.com/aws/aws-sdk-go-v2/service/elasticache/types"
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/discovery/targetgroup"
+)
+
+// Struct for test data.
+type elasticacheDataStore struct {
+ region string
+ serverlessCaches []types.ServerlessCache
+ cacheClusters []types.CacheCluster
+ tags map[string][]types.Tag // keyed by cache ARN
+}
+
+func TestElasticacheDiscoveryDescribeServerlessCaches(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ ecData *elasticacheDataStore
+ cacheNames []string
+ expectedCount int
+ }{
+ {
+ name: "MultipleCaches",
+ ecData: &elasticacheDataStore{
+ region: "us-west-2",
+ serverlessCaches: []types.ServerlessCache{
+ {
+ ServerlessCacheName: strptr("test-cache"),
+ ARN: strptr("arn:aws:elasticache:us-west-2:123456789012:serverlesscache:test-cache"),
+ Status: strptr("available"),
+ Engine: strptr("redis"),
+ FullEngineVersion: strptr("7.1"),
+ CreateTime: aws.Time(time.Now()),
+ Endpoint: &types.Endpoint{
+ Address: strptr("test-cache.serverless.use1.cache.amazonaws.com"),
+ Port: aws.Int32(6379),
+ },
+ },
+ {
+ ServerlessCacheName: strptr("prod-cache"),
+ ARN: strptr("arn:aws:elasticache:us-west-2:123456789012:serverlesscache:prod-cache"),
+ Status: strptr("available"),
+ Engine: strptr("valkey"),
+ FullEngineVersion: strptr("7.2"),
+ CreateTime: aws.Time(time.Now()),
+ Endpoint: &types.Endpoint{
+ Address: strptr("prod-cache.serverless.use1.cache.amazonaws.com"),
+ Port: aws.Int32(6379),
+ },
+ },
+ },
+ },
+ cacheNames: []string{},
+ expectedCount: 2,
+ },
+ {
+ name: "SingleCache",
+ ecData: &elasticacheDataStore{
+ region: "us-east-1",
+ serverlessCaches: []types.ServerlessCache{
+ {
+ ServerlessCacheName: strptr("single-cache"),
+ ARN: strptr("arn:aws:elasticache:us-east-1:123456789012:serverlesscache:single-cache"),
+ Status: strptr("available"),
+ Engine: strptr("redis"),
+ FullEngineVersion: strptr("7.1"),
+ CreateTime: aws.Time(time.Now()),
+ },
+ },
+ },
+ cacheNames: []string{"single-cache"},
+ expectedCount: 1,
+ },
+ {
+ name: "NoCaches",
+ ecData: &elasticacheDataStore{
+ region: "us-east-1",
+ serverlessCaches: []types.ServerlessCache{},
+ },
+ cacheNames: []string{},
+ expectedCount: 0,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockElasticacheClient(tt.ecData)
+
+ d := &ElasticacheDiscovery{
+ elasticacheClient: client,
+ cfg: &ElasticacheSDConfig{
+ Region: tt.ecData.region,
+ RequestConcurrency: 10,
+ },
+ }
+
+ caches, err := d.describeServerlessCaches(ctx, tt.cacheNames)
+ require.NoError(t, err)
+ require.Len(t, caches, tt.expectedCount)
+ })
+ }
+}
+
+func TestElasticacheDiscoveryDescribeCacheClusters(t *testing.T) {
+ ctx := context.Background()
+
+ for _, tt := range []struct {
+ name string
+ ecData *elasticacheDataStore
+ clusterIDs []string
+ expectedCount int
+ skipTest bool
+ }{
+ {
+ name: "MockValidation",
+ ecData: &elasticacheDataStore{
+ region: "us-west-2",
+ cacheClusters: []types.CacheCluster{
+ {
+ CacheClusterId: strptr("test-cluster-001"),
+ ARN: strptr("arn:aws:elasticache:us-west-2:123456789012:cluster:test-cluster-001"),
+ CacheClusterStatus: strptr("available"),
+ Engine: strptr("redis"),
+ EngineVersion: strptr("7.1"),
+ CacheNodeType: strptr("cache.t3.micro"),
+ NumCacheNodes: aws.Int32(1),
+ ConfigurationEndpoint: &types.Endpoint{
+ Address: strptr("test-cluster.abc123.cfg.use1.cache.amazonaws.com"),
+ Port: aws.Int32(6379),
+ },
+ },
+ },
+ },
+ clusterIDs: []string{},
+ expectedCount: 1,
+ skipTest: false,
+ },
+ {
+ name: "NoClusters",
+ ecData: &elasticacheDataStore{
+ region: "us-east-1",
+ cacheClusters: []types.CacheCluster{},
+ },
+ clusterIDs: []string{},
+ expectedCount: 0,
+ skipTest: false,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ if tt.skipTest {
+ t.Skip("Skipping complex test with concurrency")
+ }
+ client := newMockElasticacheClient(tt.ecData)
+
+ // Verify mock returns expected data
+ output, err := client.DescribeCacheClusters(ctx, &elasticache.DescribeCacheClustersInput{})
+ require.NoError(t, err)
+ require.Len(t, output.CacheClusters, tt.expectedCount)
+ })
+ }
+}
+
+func TestAddServerlessCacheTargets(t *testing.T) {
+ testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)
+
+ tests := []struct {
+ name string
+ cache *types.ServerlessCache
+ tags []types.Tag
+ expectedLabels model.LabelSet
+ }{
+ {
+ name: "ServerlessCacheWithEndpoint",
+ cache: &types.ServerlessCache{
+ ServerlessCacheName: strptr("my-cache"),
+ ARN: strptr("arn:aws:elasticache:us-east-1:123456789012:serverlesscache:my-cache"),
+ Status: strptr("available"),
+ Engine: strptr("redis"),
+ FullEngineVersion: strptr("7.1"),
+ MajorEngineVersion: strptr("7"),
+ CreateTime: aws.Time(testTime),
+ Endpoint: &types.Endpoint{
+ Address: strptr("my-cache.serverless.use1.cache.amazonaws.com"),
+ Port: aws.Int32(6379),
+ },
+ ReaderEndpoint: &types.Endpoint{
+ Address: strptr("my-cache-ro.serverless.use1.cache.amazonaws.com"),
+ Port: aws.Int32(6379),
+ },
+ SecurityGroupIds: []string{"sg-12345"},
+ SubnetIds: []string{"subnet-abcdef"},
+ CacheUsageLimits: &types.CacheUsageLimits{
+ DataStorage: &types.DataStorage{
+ Maximum: aws.Int32(10),
+ Minimum: aws.Int32(1),
+ Unit: types.DataStorageUnitGb,
+ },
+ ECPUPerSecond: &types.ECPUPerSecond{
+ Maximum: aws.Int32(5000),
+ Minimum: aws.Int32(1000),
+ },
+ },
+ },
+ tags: []types.Tag{
+ {Key: strptr("Environment"), Value: strptr("test")},
+ },
+ expectedLabels: model.LabelSet{
+ "__meta_elasticache_deployment_option": "serverless",
+ "__meta_elasticache_serverless_cache_arn": "arn:aws:elasticache:us-east-1:123456789012:serverlesscache:my-cache",
+ "__meta_elasticache_serverless_cache_name": "my-cache",
+ "__meta_elasticache_serverless_cache_status": "available",
+ "__meta_elasticache_serverless_cache_engine": "redis",
+ "__meta_elasticache_serverless_cache_full_engine_version": "7.1",
+ "__meta_elasticache_serverless_cache_major_engine_version": "7",
+ "__meta_elasticache_serverless_cache_create_time": "2024-01-01T00:00:00Z",
+ "__meta_elasticache_serverless_cache_endpoint_address": "my-cache.serverless.use1.cache.amazonaws.com",
+ "__meta_elasticache_serverless_cache_endpoint_port": "6379",
+
+ "__meta_elasticache_serverless_cache_security_group_id_0": "sg-12345",
+ "__meta_elasticache_serverless_cache_subnet_id_0": "subnet-abcdef",
+
+ "__address__": "my-cache.serverless.use1.cache.amazonaws.com:6379",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tg := &targetgroup.Group{
+ Source: "test",
+ }
+
+ addServerlessCacheTargets(tg, tt.cache, tt.tags)
+
+ require.Len(t, tg.Targets, 1)
+ labels := tg.Targets[0]
+
+ // Check that all expected labels are present with correct values
+ for k, v := range tt.expectedLabels {
+ actualValue, exists := labels[k]
+ require.True(t, exists, "label %s should exist", k)
+ require.Equal(t, v, actualValue, "label %s mismatch", k)
+ }
+ })
+ }
+}
+
+func TestAddCacheClusterTargets(t *testing.T) {
+ testTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)
+
+ tests := []struct {
+ name string
+ cluster *types.CacheCluster
+ tags []types.Tag
+ expectedTargetCount int
+ expectedLabels []model.LabelSet // One per node
+ }{
+ {
+ name: "CacheClusterWithMultipleNodes",
+ cluster: &types.CacheCluster{
+ CacheClusterId: strptr("my-cluster-001"),
+ ARN: strptr("arn:aws:elasticache:us-east-1:123456789012:cluster:my-cluster-001"),
+ CacheClusterStatus: strptr("available"),
+ Engine: strptr("redis"),
+ EngineVersion: strptr("7.1"),
+ CacheNodeType: strptr("cache.t3.micro"),
+ NumCacheNodes: aws.Int32(2),
+ CacheClusterCreateTime: aws.Time(testTime),
+ ConfigurationEndpoint: &types.Endpoint{
+ Address: strptr("my-cluster.abc123.cfg.use1.cache.amazonaws.com"),
+ Port: aws.Int32(6379),
+ },
+ AtRestEncryptionEnabled: aws.Bool(true),
+ TransitEncryptionEnabled: aws.Bool(true),
+ AuthTokenEnabled: aws.Bool(true),
+ AutoMinorVersionUpgrade: aws.Bool(true),
+ CacheSubnetGroupName: strptr("my-subnet-group"),
+ PreferredAvailabilityZone: strptr("us-east-1a"),
+ SecurityGroups: []types.SecurityGroupMembership{
+ {
+ SecurityGroupId: strptr("sg-12345"),
+ Status: strptr("active"),
+ },
+ },
+ CacheNodes: []types.CacheNode{
+ {
+ CacheNodeId: strptr("0001"),
+ CacheNodeStatus: strptr("available"),
+ CacheNodeCreateTime: aws.Time(testTime),
+ CustomerAvailabilityZone: strptr("us-east-1a"),
+ Endpoint: &types.Endpoint{
+ Address: strptr("my-cluster-001.abc123.0001.use1.cache.amazonaws.com"),
+ Port: aws.Int32(6379),
+ },
+ },
+ {
+ CacheNodeId: strptr("0002"),
+ CacheNodeStatus: strptr("available"),
+ CacheNodeCreateTime: aws.Time(testTime),
+ CustomerAvailabilityZone: strptr("us-east-1b"),
+ Endpoint: &types.Endpoint{
+ Address: strptr("my-cluster-001.abc123.0002.use1.cache.amazonaws.com"),
+ Port: aws.Int32(6379),
+ },
+ },
+ },
+ },
+ tags: []types.Tag{
+ {Key: strptr("Environment"), Value: strptr("production")},
+ {Key: strptr("Application"), Value: strptr("web-app")},
+ },
+ expectedTargetCount: 2,
+ expectedLabels: []model.LabelSet{
+ {
+ "__meta_elasticache_deployment_option": "node",
+ "__meta_elasticache_cache_cluster_arn": "arn:aws:elasticache:us-east-1:123456789012:cluster:my-cluster-001",
+ "__meta_elasticache_cache_cluster_cache_cluster_id": "my-cluster-001",
+ "__meta_elasticache_cache_cluster_cache_cluster_status": "available",
+ "__meta_elasticache_cache_cluster_engine": "redis",
+ "__meta_elasticache_cache_cluster_engine_version": "7.1",
+ "__meta_elasticache_cache_cluster_cache_node_type": "cache.t3.micro",
+ "__meta_elasticache_cache_cluster_num_cache_nodes": "2",
+ "__meta_elasticache_cache_cluster_cache_cluster_create_time": "2024-01-01T00:00:00Z",
+ "__meta_elasticache_cache_cluster_configuration_endpoint_address": "my-cluster.abc123.cfg.use1.cache.amazonaws.com",
+ "__meta_elasticache_cache_cluster_configuration_endpoint_port": "6379",
+ "__meta_elasticache_cache_cluster_at_rest_encryption_enabled": "true",
+ "__meta_elasticache_cache_cluster_transit_encryption_enabled": "true",
+ "__meta_elasticache_cache_cluster_auth_token_enabled": "true",
+ "__meta_elasticache_cache_cluster_auto_minor_version_upgrade": "true",
+ "__meta_elasticache_cache_cluster_cache_subnet_group_name": "my-subnet-group",
+ "__meta_elasticache_cache_cluster_preferred_availability_zone": "us-east-1a",
+ "__meta_elasticache_cache_cluster_security_group_membership_id_0": "sg-12345",
+ "__meta_elasticache_cache_cluster_security_group_membership_status_0": "active",
+ "__meta_elasticache_cache_cluster_tag_Environment": "production",
+ "__meta_elasticache_cache_cluster_tag_Application": "web-app",
+ "__meta_elasticache_cache_cluster_node_id": "0001",
+ "__meta_elasticache_cache_cluster_node_status": "available",
+ "__meta_elasticache_cache_cluster_node_create_time": "2024-01-01T00:00:00Z",
+ "__meta_elasticache_cache_cluster_node_availability_zone": "us-east-1a",
+ "__meta_elasticache_cache_cluster_node_endpoint_address": "my-cluster-001.abc123.0001.use1.cache.amazonaws.com",
+ "__meta_elasticache_cache_cluster_node_endpoint_port": "6379",
+ "__address__": "my-cluster-001.abc123.0001.use1.cache.amazonaws.com:6379",
+ },
+ {
+ "__meta_elasticache_deployment_option": "node",
+ "__meta_elasticache_cache_cluster_arn": "arn:aws:elasticache:us-east-1:123456789012:cluster:my-cluster-001",
+ "__meta_elasticache_cache_cluster_cache_cluster_id": "my-cluster-001",
+ "__meta_elasticache_cache_cluster_cache_cluster_status": "available",
+ "__meta_elasticache_cache_cluster_engine": "redis",
+ "__meta_elasticache_cache_cluster_engine_version": "7.1",
+ "__meta_elasticache_cache_cluster_cache_node_type": "cache.t3.micro",
+ "__meta_elasticache_cache_cluster_num_cache_nodes": "2",
+ "__meta_elasticache_cache_cluster_cache_cluster_create_time": "2024-01-01T00:00:00Z",
+ "__meta_elasticache_cache_cluster_configuration_endpoint_address": "my-cluster.abc123.cfg.use1.cache.amazonaws.com",
+ "__meta_elasticache_cache_cluster_configuration_endpoint_port": "6379",
+ "__meta_elasticache_cache_cluster_at_rest_encryption_enabled": "true",
+ "__meta_elasticache_cache_cluster_transit_encryption_enabled": "true",
+ "__meta_elasticache_cache_cluster_auth_token_enabled": "true",
+ "__meta_elasticache_cache_cluster_auto_minor_version_upgrade": "true",
+ "__meta_elasticache_cache_cluster_cache_subnet_group_name": "my-subnet-group",
+ "__meta_elasticache_cache_cluster_preferred_availability_zone": "us-east-1a",
+ "__meta_elasticache_cache_cluster_security_group_membership_id_0": "sg-12345",
+ "__meta_elasticache_cache_cluster_security_group_membership_status_0": "active",
+ "__meta_elasticache_cache_cluster_tag_Environment": "production",
+ "__meta_elasticache_cache_cluster_tag_Application": "web-app",
+ "__meta_elasticache_cache_cluster_node_id": "0002",
+ "__meta_elasticache_cache_cluster_node_status": "available",
+ "__meta_elasticache_cache_cluster_node_create_time": "2024-01-01T00:00:00Z",
+ "__meta_elasticache_cache_cluster_node_availability_zone": "us-east-1b",
+ "__meta_elasticache_cache_cluster_node_endpoint_address": "my-cluster-001.abc123.0002.use1.cache.amazonaws.com",
+ "__meta_elasticache_cache_cluster_node_endpoint_port": "6379",
+ "__address__": "my-cluster-001.abc123.0002.use1.cache.amazonaws.com:6379",
+ },
+ },
+ },
+ {
+ name: "CacheClusterWithSingleNode",
+ cluster: &types.CacheCluster{
+ CacheClusterId: strptr("node-cluster-001"),
+ ARN: strptr("arn:aws:elasticache:us-east-1:123456789012:cluster:node-cluster-001"),
+ CacheClusterStatus: strptr("available"),
+ Engine: strptr("redis"),
+ EngineVersion: strptr("6.2"),
+ CacheNodeType: strptr("cache.r6g.large"),
+ NumCacheNodes: aws.Int32(1),
+ CacheNodes: []types.CacheNode{
+ {
+ CacheNodeId: strptr("0001"),
+ CacheNodeStatus: strptr("available"),
+ CacheNodeCreateTime: aws.Time(testTime),
+ CustomerAvailabilityZone: strptr("us-east-1a"),
+ Endpoint: &types.Endpoint{
+ Address: strptr("node-cluster-001.abc123.0001.use1.cache.amazonaws.com"),
+ Port: aws.Int32(6379),
+ },
+ },
+ },
+ },
+ tags: []types.Tag{},
+ expectedTargetCount: 1,
+ expectedLabels: []model.LabelSet{
+ {
+ "__meta_elasticache_deployment_option": "node",
+ "__meta_elasticache_cache_cluster_arn": "arn:aws:elasticache:us-east-1:123456789012:cluster:node-cluster-001",
+ "__meta_elasticache_cache_cluster_cache_cluster_id": "node-cluster-001",
+ "__meta_elasticache_cache_cluster_cache_cluster_status": "available",
+ "__meta_elasticache_cache_cluster_engine": "redis",
+ "__meta_elasticache_cache_cluster_engine_version": "6.2",
+ "__meta_elasticache_cache_cluster_cache_node_type": "cache.r6g.large",
+ "__meta_elasticache_cache_cluster_num_cache_nodes": "1",
+ "__meta_elasticache_cache_cluster_node_id": "0001",
+ "__meta_elasticache_cache_cluster_node_status": "available",
+ "__meta_elasticache_cache_cluster_node_create_time": "2024-01-01T00:00:00Z",
+ "__meta_elasticache_cache_cluster_node_availability_zone": "us-east-1a",
+ "__meta_elasticache_cache_cluster_node_endpoint_address": "node-cluster-001.abc123.0001.use1.cache.amazonaws.com",
+ "__meta_elasticache_cache_cluster_node_endpoint_port": "6379",
+ "__address__": "node-cluster-001.abc123.0001.use1.cache.amazonaws.com:6379",
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tg := &targetgroup.Group{
+ Source: "test",
+ }
+
+ addCacheClusterTargets(tg, tt.cluster, tt.tags)
+
+ require.Len(t, tg.Targets, tt.expectedTargetCount)
+
+ // Check each target
+ for i, expectedLabels := range tt.expectedLabels {
+ labels := tg.Targets[i]
+
+ // Check that all expected labels are present with correct values
+ for k, v := range expectedLabels {
+ actualValue, exists := labels[k]
+ require.True(t, exists, "label %s should exist in target %d", k, i)
+ require.Equal(t, v, actualValue, "label %s mismatch in target %d", k, i)
+ }
+ }
+ })
+ }
+}
+
+// Mock Elasticache client.
+type mockElasticacheClient struct {
+ data *elasticacheDataStore
+}
+
+func newMockElasticacheClient(data *elasticacheDataStore) *mockElasticacheClient {
+ return &mockElasticacheClient{data: data}
+}
+
+func (m *mockElasticacheClient) DescribeServerlessCaches(_ context.Context, input *elasticache.DescribeServerlessCachesInput, _ ...func(*elasticache.Options)) (*elasticache.DescribeServerlessCachesOutput, error) {
+ if input.ServerlessCacheName != nil {
+ // Filter by name
+ for _, cache := range m.data.serverlessCaches {
+ if cache.ServerlessCacheName != nil && *cache.ServerlessCacheName == *input.ServerlessCacheName {
+ return &elasticache.DescribeServerlessCachesOutput{
+ ServerlessCaches: []types.ServerlessCache{cache},
+ }, nil
+ }
+ }
+ return &elasticache.DescribeServerlessCachesOutput{
+ ServerlessCaches: []types.ServerlessCache{},
+ }, nil
+ }
+
+ return &elasticache.DescribeServerlessCachesOutput{
+ ServerlessCaches: m.data.serverlessCaches,
+ }, nil
+}
+
+func (m *mockElasticacheClient) DescribeCacheClusters(_ context.Context, input *elasticache.DescribeCacheClustersInput, _ ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) {
+ if input.CacheClusterId != nil {
+ // Single cluster lookup
+ for _, cluster := range m.data.cacheClusters {
+ if cluster.CacheClusterId != nil && *cluster.CacheClusterId == *input.CacheClusterId {
+ return &elasticache.DescribeCacheClustersOutput{
+ CacheClusters: []types.CacheCluster{cluster},
+ }, nil
+ }
+ }
+ return &elasticache.DescribeCacheClustersOutput{
+ CacheClusters: []types.CacheCluster{},
+ }, nil
+ }
+
+ return &elasticache.DescribeCacheClustersOutput{
+ CacheClusters: m.data.cacheClusters,
+ }, nil
+}
+
+func (m *mockElasticacheClient) ListTagsForResource(_ context.Context, input *elasticache.ListTagsForResourceInput, _ ...func(*elasticache.Options)) (*elasticache.ListTagsForResourceOutput, error) {
+ if input.ResourceName != nil {
+ if tags, ok := m.data.tags[*input.ResourceName]; ok {
+ return &elasticache.ListTagsForResourceOutput{
+ TagList: tags,
+ }, nil
+ }
+ }
+
+ return &elasticache.ListTagsForResourceOutput{
+ TagList: []types.Tag{},
+ }, nil
+}
+
+func TestSplitCacheDeploymentOptions(t *testing.T) {
+ tests := []struct {
+ name string
+ caches []string
+ expectedServerlessCacheIDs []string
+ expectedCacheClusterIDs []string
+ }{
+ {
+ name: "MixedARNs",
+ caches: []string{
+ "arn:aws:elasticache:us-east-1:123456789012:serverlesscache:my-serverless-cache",
+ "arn:aws:elasticache:us-east-1:123456789012:replicationgroup:my-replication-group",
+ "arn:aws:elasticache:us-west-2:123456789012:serverlesscache:prod-cache",
+ },
+ expectedServerlessCacheIDs: []string{"my-serverless-cache", "prod-cache"},
+ expectedCacheClusterIDs: []string{"my-replication-group"},
+ },
+ {
+ name: "OnlyServerlessCaches",
+ caches: []string{
+ "arn:aws:elasticache:us-east-1:123456789012:serverlesscache:cache-1",
+ "arn:aws:elasticache:us-east-1:123456789012:serverlesscache:cache-2",
+ },
+ expectedServerlessCacheIDs: []string{"cache-1", "cache-2"},
+ expectedCacheClusterIDs: nil,
+ },
+ {
+ name: "OnlyReplicationGroups",
+ caches: []string{
+ "arn:aws:elasticache:us-east-1:123456789012:replicationgroup:cluster-1",
+ "arn:aws:elasticache:us-east-1:123456789012:replicationgroup:cluster-2",
+ },
+ expectedServerlessCacheIDs: nil,
+ expectedCacheClusterIDs: []string{"cluster-1", "cluster-2"},
+ },
+ {
+ name: "EmptyInput",
+ caches: []string{},
+ expectedServerlessCacheIDs: nil,
+ expectedCacheClusterIDs: nil,
+ },
+ {
+ name: "InvalidARNs",
+ caches: []string{
+ "not-an-arn",
+ "arn:aws:elasticache:us-east-1",
+ "",
+ },
+ expectedServerlessCacheIDs: nil,
+ expectedCacheClusterIDs: nil,
+ },
+ {
+ name: "UnknownResourceType",
+ caches: []string{
+ "arn:aws:elasticache:us-east-1:123456789012:unknown:resource-id",
+ },
+ expectedServerlessCacheIDs: nil,
+ expectedCacheClusterIDs: nil,
+ },
+ {
+ name: "MixedWithInvalidARNs",
+ caches: []string{
+ "arn:aws:elasticache:us-east-1:123456789012:serverlesscache:valid-cache",
+ "invalid-arn",
+ "arn:aws:elasticache:us-east-1:123456789012:replicationgroup:valid-cluster",
+ "",
+ "arn:aws:elasticache:us-east-1:123456789012:unknown:ignored",
+ },
+ expectedServerlessCacheIDs: []string{"valid-cache"},
+ expectedCacheClusterIDs: []string{"valid-cluster"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ serverlessCacheIDs, cacheClusterIDs := splitCacheDeploymentOptions(tt.caches)
+
+ require.Equal(t, tt.expectedServerlessCacheIDs, serverlessCacheIDs, "serverless cache IDs mismatch")
+ require.Equal(t, tt.expectedCacheClusterIDs, cacheClusterIDs, "cache cluster IDs mismatch")
+ })
+ }
+}
diff --git a/discovery/aws/metrics_elasticache.go b/discovery/aws/metrics_elasticache.go
new file mode 100644
index 0000000000..7ecfcb4b72
--- /dev/null
+++ b/discovery/aws/metrics_elasticache.go
@@ -0,0 +1,32 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aws
+
+import (
+ "github.com/prometheus/prometheus/discovery"
+)
+
+type elasticacheMetrics struct {
+ refreshMetrics discovery.RefreshMetricsInstantiator
+}
+
+var _ discovery.DiscovererMetrics = (*elasticacheMetrics)(nil)
+
+// Register implements discovery.DiscovererMetrics.
+func (*elasticacheMetrics) Register() error {
+ return nil
+}
+
+// Unregister implements discovery.DiscovererMetrics.
+func (*elasticacheMetrics) Unregister() {}
diff --git a/discovery/manager_test.go b/discovery/manager_test.go
index 162730d9aa..8a49005100 100644
--- a/discovery/manager_test.go
+++ b/discovery/manager_test.go
@@ -1562,11 +1562,9 @@ func TestConfigReloadAndShutdownRace(t *testing.T) {
discoveryManager.updatert = 100 * time.Millisecond
var wgDiscovery sync.WaitGroup
- wgDiscovery.Add(1)
- go func() {
+ wgDiscovery.Go(func() {
discoveryManager.Run()
- wgDiscovery.Done()
- }()
+ })
time.Sleep(time.Millisecond * 200)
var wgBg sync.WaitGroup
@@ -1588,11 +1586,9 @@ func TestConfigReloadAndShutdownRace(t *testing.T) {
discoveryManager.ApplyConfig(c)
delete(c, "prometheus")
- wgBg.Add(1)
- go func() {
+ wgBg.Go(func() {
discoveryManager.ApplyConfig(c)
- wgBg.Done()
- }()
+ })
mgrCancel()
wgDiscovery.Wait()
diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md
index a3193c74c7..53fd6aaae5 100644
--- a/docs/configuration/configuration.md
+++ b/docs/configuration/configuration.md
@@ -1029,11 +1029,117 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_msk_broker_node_exporter_enabled`: whether node exporter is enabled on brokers (broker nodes only)
* `__meta_msk_controller_endpoint_index`: the index of the controller endpoint (controller nodes only)
+#### `elasticache`
+
+The `elasticache` role discovers targets from AWS ElastiCache for both serverless caches and cache clusters.
+
+**Important**: For cache clusters, one target is created per cache node. Each target includes the cluster-level labels (ARN, status, tags, etc.) and node-specific labels (node ID, endpoint, availability zone, etc.). The `__address__` label is set to the individual node's endpoint address and port.
+
+For serverless caches, one target is created per serverless cache, with the `__address__` label set to the serverless cache endpoint.
+
+The IAM credentials used must have the following permissions to discover scrape targets:
+
+- `elasticache:DescribeServerlessCaches`
+- `elasticache:DescribeCacheClusters`
+- `elasticache:ListTagsForResource`
+
+The following meta labels are available on targets during [relabeling](#relabel_config):
+
+**Common labels (available on all targets):**
+
+* `__meta_elasticache_deployment_option`: the deployment option - either `serverless` for serverless caches or `node` for cache cluster nodes
+
+**Serverless Cache labels:**
+
+* `__meta_elasticache_serverless_cache_arn`: the ARN of the serverless cache
+* `__meta_elasticache_serverless_cache_name`: the name of the serverless cache
+* `__meta_elasticache_serverless_cache_status`: the status of the serverless cache
+* `__meta_elasticache_serverless_cache_engine`: the cache engine (redis or valkey)
+* `__meta_elasticache_serverless_cache_full_engine_version`: the full engine version
+* `__meta_elasticache_serverless_cache_major_engine_version`: the major engine version
+* `__meta_elasticache_serverless_cache_description`: the description of the serverless cache
+* `__meta_elasticache_serverless_cache_create_time`: the creation time in RFC3339 format
+* `__meta_elasticache_serverless_cache_snapshot_retention_limit`: the snapshot retention limit in days
+* `__meta_elasticache_serverless_cache_daily_snapshot_time`: the daily snapshot time
+* `__meta_elasticache_serverless_cache_user_group_id`: the user group ID
+* `__meta_elasticache_serverless_cache_kms_key_id`: the KMS key ID for encryption at rest
+* `__meta_elasticache_serverless_cache_endpoint_address`: the endpoint address
+* `__meta_elasticache_serverless_cache_endpoint_port`: the endpoint port
+* `__meta_elasticache_serverless_cache_reader_endpoint_address`: the reader endpoint address
+* `__meta_elasticache_serverless_cache_reader_endpoint_port`: the reader endpoint port
+* `__meta_elasticache_serverless_cache_security_group_id_`: security group IDs (indexed)
+* `__meta_elasticache_serverless_cache_subnet_id_`: subnet IDs (indexed)
+* `__meta_elasticache_serverless_cache_cache_usage_limit_data_storage_maximum`: maximum data storage in the specified unit
+* `__meta_elasticache_serverless_cache_cache_usage_limit_data_storage_minimum`: minimum data storage in the specified unit
+* `__meta_elasticache_serverless_cache_cache_usage_limit_data_storage_unit`: unit for data storage (e.g., GB)
+* `__meta_elasticache_serverless_cache_cache_usage_limit_ecpu_per_second_maximum`: maximum ECPU per second
+* `__meta_elasticache_serverless_cache_cache_usage_limit_ecpu_per_second_minimum`: minimum ECPU per second
+* `__meta_elasticache_serverless_cache_tag_`: each serverless cache tag value, keyed by tag name
+
+**Cache Cluster labels:**
+
+* `__meta_elasticache_cache_cluster_arn`: the ARN of the cache cluster
+* `__meta_elasticache_cache_cluster_cache_cluster_id`: the cache cluster ID
+* `__meta_elasticache_cache_cluster_cache_cluster_status`: the status of the cache cluster
+* `__meta_elasticache_cache_cluster_engine`: the cache engine (redis or memcached)
+* `__meta_elasticache_cache_cluster_engine_version`: the engine version
+* `__meta_elasticache_cache_cluster_cache_node_type`: the cache node type (e.g., cache.t3.micro)
+* `__meta_elasticache_cache_cluster_num_cache_nodes`: the number of cache nodes
+* `__meta_elasticache_cache_cluster_cache_cluster_create_time`: the creation time in RFC3339 format
+* `__meta_elasticache_cache_cluster_at_rest_encryption_enabled`: whether encryption at rest is enabled
+* `__meta_elasticache_cache_cluster_transit_encryption_enabled`: whether encryption in transit is enabled
+* `__meta_elasticache_cache_cluster_transit_encryption_mode`: the transit encryption mode
+* `__meta_elasticache_cache_cluster_auth_token_enabled`: whether auth token is enabled
+* `__meta_elasticache_cache_cluster_auth_token_last_modified`: the last modification time of auth token
+* `__meta_elasticache_cache_cluster_auto_minor_version_upgrade`: whether auto minor version upgrade is enabled
+* `__meta_elasticache_cache_cluster_cache_parameter_group`: the cache parameter group name
+* `__meta_elasticache_cache_cluster_cache_subnet_group_name`: the cache subnet group name
+* `__meta_elasticache_cache_cluster_client_download_landing_page`: the client download landing page URL
+* `__meta_elasticache_cache_cluster_ip_discovery`: the IP discovery mode (ipv4 or ipv6)
+* `__meta_elasticache_cache_cluster_network_type`: the network type (ipv4, ipv6, or dual_stack)
+* `__meta_elasticache_cache_cluster_preferred_availability_zone`: the preferred availability zone
+* `__meta_elasticache_cache_cluster_preferred_maintenance_window`: the preferred maintenance window
+* `__meta_elasticache_cache_cluster_preferred_outpost_arn`: the preferred outpost ARN
+* `__meta_elasticache_cache_cluster_replication_group_id`: the replication group ID (for Redis clusters that are part of a replication group)
+* `__meta_elasticache_cache_cluster_replication_group_log_delivery_enabled`: whether log delivery is enabled for the replication group
+* `__meta_elasticache_cache_cluster_snapshot_retention_limit`: the snapshot retention limit in days
+* `__meta_elasticache_cache_cluster_snapshot_window`: the daily snapshot window
+* `__meta_elasticache_cache_cluster_configuration_endpoint_address`: the configuration endpoint address (cluster mode enabled only)
+* `__meta_elasticache_cache_cluster_configuration_endpoint_port`: the configuration endpoint port (cluster mode enabled only)
+* `__meta_elasticache_cache_cluster_notification_topic_arn`: the SNS topic ARN for notifications
+* `__meta_elasticache_cache_cluster_notification_topic_status`: the SNS topic status
+* `__meta_elasticache_cache_cluster_log_delivery_configuration_destination_type_`: log delivery destination type (cloudwatch-logs or kinesis-firehose)
+* `__meta_elasticache_cache_cluster_log_delivery_configuration_log_format_`: log format (text or json)
+* `__meta_elasticache_cache_cluster_log_delivery_configuration_log_type_`: log type (slow-log or engine-log)
+* `__meta_elasticache_cache_cluster_log_delivery_configuration_status_`: log delivery status
+* `__meta_elasticache_cache_cluster_log_delivery_configuration_message_`: log delivery message
+* `__meta_elasticache_cache_cluster_log_delivery_configuration_log_group_`: CloudWatch log group name (cloudwatch-logs destination only)
+* `__meta_elasticache_cache_cluster_log_delivery_configuration_delivery_stream_`: Kinesis Firehose delivery stream name (kinesis-firehose destination only)
+* `__meta_elasticache_cache_cluster_pending_modified_values_auth_token_status`: pending auth token status
+* `__meta_elasticache_cache_cluster_pending_modified_values_cache_node_type`: pending cache node type change
+* `__meta_elasticache_cache_cluster_pending_modified_values_engine_version`: pending engine version upgrade
+* `__meta_elasticache_cache_cluster_pending_modified_values_num_cache_nodes`: pending number of cache nodes
+* `__meta_elasticache_cache_cluster_pending_modified_values_transit_encryption_enabled`: pending transit encryption status
+* `__meta_elasticache_cache_cluster_pending_modified_values_transit_encryption_mode`: pending transit encryption mode
+* `__meta_elasticache_cache_cluster_pending_modified_values_cache_node_ids_to_remove`: comma-separated list of cache node IDs to be removed
+* `__meta_elasticache_cache_cluster_security_group_membership_id_`: security group ID (indexed)
+* `__meta_elasticache_cache_cluster_security_group_membership_status_`: security group status (indexed)
+* `__meta_elasticache_cache_cluster_node_id`: cache node ID
+* `__meta_elasticache_cache_cluster_node_status`: cache node status
+* `__meta_elasticache_cache_cluster_node_create_time`: cache node creation time in RFC3339 format
+* `__meta_elasticache_cache_cluster_node_availability_zone`: cache node availability zone
+* `__meta_elasticache_cache_cluster_node_customer_outpost_arn`: cache node outpost ARN
+* `__meta_elasticache_cache_cluster_node_source_cache_node_id`: source cache node ID for replication
+* `__meta_elasticache_cache_cluster_node_parameter_group_status`: parameter group status
+* `__meta_elasticache_cache_cluster_node_endpoint_address`: cache node endpoint address
+* `__meta_elasticache_cache_cluster_node_endpoint_port`: cache node endpoint port
+* `__meta_elasticache_cache_cluster_tag_`: each cache cluster tag value, keyed by tag name
+
See below for the configuration options for AWS discovery:
```yaml
# The AWS role to use for service discovery.
-# Must be one of: ec2, lightsail, ecs, or msk.
+# Must be one of: ec2, lightsail, ecs, msk, or elasticache.
role:
# The AWS region. If blank, the region from the instance metadata is used.
@@ -1069,8 +1175,9 @@ filters:
[ - name:
values: , [...] ]
-# List of ECS or MSK cluster ARNs (ecs and msk roles only) to discover. If empty, all clusters in the region are discovered.
-# This can significantly improve performance when you only need to monitor specific clusters.
+# List of ECS, ElastiCache, or MSK cluster identifiers (ecs, elasticache, and msk roles only) to discover.
+# A List of ARNs of clusters to discover. If empty, all clusters in the region are discovered.
+# This can significantly improve performance when you only need to monitor specific clusters/caches.
[ clusters: [, ...] ]
# HTTP client settings, including authentication methods (such as basic auth and
@@ -2528,8 +2635,7 @@ in the configuration file), which can also be changed using relabeling.
### ``
-Nerve SD configurations allow retrieving scrape targets from [AirBnB's Nerve]
-(https://github.com/airbnb/nerve) which are stored in
+Nerve SD configurations allow retrieving scrape targets from [AirBnB's Nerve](https://github.com/airbnb/nerve) which are stored in
[Zookeeper](https://zookeeper.apache.org/).
The following meta labels are available on targets during [relabeling](#relabel_config):
@@ -2583,8 +2689,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
### ``
-Serverset SD configurations allow retrieving scrape targets from [Serversets]
-(https://github.com/twitter/finagle/tree/develop/finagle-serversets) which are
+Serverset SD configurations allow retrieving scrape targets from [Serversets](https://github.com/twitter/finagle/tree/develop/finagle-serversets) which are
stored in [Zookeeper](https://zookeeper.apache.org/). Serversets are commonly
used by [Finagle](https://twitter.github.io/finagle/) and
[Aurora](https://aurora.apache.org/).
diff --git a/docs/querying/functions.md b/docs/querying/functions.md
index 3a9b7025f8..68a003359d 100644
--- a/docs/querying/functions.md
+++ b/docs/querying/functions.md
@@ -433,6 +433,23 @@ and is therefore flagged by an info-level annotation reading `input to
histogram_quantile needed to be fixed for monotonicity`. If you encounter this
annotation, you should find and remove the source of the invalid data.
+## `histogram_quantiles()`
+
+**This function has to be enabled via the [feature
+flag](../feature_flags.md#experimental-promql-functions)
+`--enable-feature=promql-experimental-functions`.**
+
+`histogram_quantiles(v instant-vector, quantile_label string, φ_1 scalar, φ_2 scalar, ...)` calculates multiple (between 1 and 10) φ-quantiles (0 ≤
+φ ≤ 1) from a [classic
+histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) or from
+a native histogram. Quantile calculation works the same way as in `histogram_quantile()`.
+The second argument (a string) specifies the label name that is used to identify different quantiles in the query result.
+```
+histogram_quantiles(sum(rate(foo[1m])), "quantile", 0.9, 0.99)
+# => {quantile="0.9"} 123
+ {quantile="0.99"} 128
+```
+
## `histogram_stddev()` and `histogram_stdvar()`
`histogram_stddev(v instant-vector)` returns the estimated standard deviation
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go
index 2e78354bd2..61488127f6 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go
@@ -73,8 +73,7 @@ func pathFromMetric(m model.Metric, prefix string) string {
// Since we use '.' instead of '=' to separate label and values
// it means that we can't have an '.' in the metric name. Fortunately
// this is prohibited in prometheus metrics.
- buffer.WriteString(fmt.Sprintf(
- ".%s.%s", string(l), escape(v)))
+ fmt.Fprintf(&buffer, ".%s.%s", string(l), escape(v))
}
return buffer.String()
}
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go
index ddf78283e7..9ef5b03e72 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go
@@ -166,9 +166,9 @@ func (c *Client) buildCommand(q *prompb.Query) (string, error) {
if m.Name == model.MetricNameLabel {
switch m.Type {
case prompb.LabelMatcher_EQ:
- measurement.WriteString(fmt.Sprintf(" == \"%s\"", m.Value))
+ fmt.Fprintf(&measurement, " == \"%s\"", m.Value)
case prompb.LabelMatcher_RE:
- measurement.WriteString(fmt.Sprintf(" =~ /%s/", escapeSlashes(m.Value)))
+ fmt.Fprintf(&measurement, " =~ /%s/", escapeSlashes(m.Value))
default:
// TODO: Figure out how to support these efficiently.
return "", errors.New("non-equal or regex-non-equal matchers are not supported on the metric name yet")
diff --git a/go.mod b/go.mod
index 89d468e874..21e477e952 100644
--- a/go.mod
+++ b/go.mod
@@ -12,22 +12,23 @@ require (
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b
github.com/aws/aws-sdk-go-v2 v1.41.1
- github.com/aws/aws-sdk-go-v2/config v1.32.7
- github.com/aws/aws-sdk-go-v2/credentials v1.19.7
- github.com/aws/aws-sdk-go-v2/service/ec2 v1.285.0
- github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0
- github.com/aws/aws-sdk-go-v2/service/kafka v1.46.7
+ github.com/aws/aws-sdk-go-v2/config v1.32.9
+ github.com/aws/aws-sdk-go-v2/credentials v1.19.9
+ github.com/aws/aws-sdk-go-v2/service/ec2 v1.290.0
+ github.com/aws/aws-sdk-go-v2/service/ecs v1.72.0
+ github.com/aws/aws-sdk-go-v2/service/elasticache v1.51.9
+ github.com/aws/aws-sdk-go-v2/service/kafka v1.48.0
github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6
- github.com/aws/smithy-go v1.24.0
+ github.com/aws/smithy-go v1.24.1
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
- github.com/digitalocean/godo v1.173.0
+ github.com/digitalocean/godo v1.175.0
github.com/docker/docker v28.5.2+incompatible
github.com/edsrzf/mmap-go v1.2.0
- github.com/envoyproxy/go-control-plane/envoy v1.36.0
- github.com/envoyproxy/protoc-gen-validate v1.3.0
+ github.com/envoyproxy/go-control-plane/envoy v1.37.0
+ github.com/envoyproxy/protoc-gen-validate v1.3.3
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
github.com/felixge/fgprof v0.9.5
github.com/fsnotify/fsnotify v1.9.0
@@ -41,24 +42,24 @@ require (
github.com/gophercloud/gophercloud/v2 v2.10.0
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
github.com/hashicorp/consul/api v1.33.2
- github.com/hashicorp/nomad/api v0.0.0-20260205205048-8315996478d1
+ github.com/hashicorp/nomad/api v0.0.0-20260220212019-daca79db0bd6
github.com/hetznercloud/hcloud-go/v2 v2.36.0
github.com/ionos-cloud/sdk-go/v6 v6.3.6
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.18.3
+ github.com/klauspost/compress v1.18.4
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.65.0
github.com/miekg/dns v1.1.72
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
- github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
+ github.com/nsf/jsondiff v0.0.0-20260207060731-8e8d90c4c0ac
github.com/oklog/run v1.2.0
github.com/oklog/ulid/v2 v2.1.1
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0
github.com/ovh/go-ovh v1.9.0
github.com/pb33f/libopenapi v0.33.4
github.com/pb33f/libopenapi-validator v0.11.1
- github.com/prometheus/alertmanager v0.31.0
+ github.com/prometheus/alertmanager v0.31.1
github.com/prometheus/client_golang v1.23.2
github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562
github.com/prometheus/client_model v0.6.2
@@ -90,17 +91,17 @@ require (
go.yaml.in/yaml/v2 v2.4.3
go.yaml.in/yaml/v3 v3.0.4
go.yaml.in/yaml/v4 v4.0.0-rc.4
- golang.org/x/oauth2 v0.34.0
+ golang.org/x/oauth2 v0.35.0
golang.org/x/sync v0.19.0
- golang.org/x/sys v0.40.0
- golang.org/x/text v0.33.0
- google.golang.org/api v0.265.0
- google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20
+ golang.org/x/sys v0.41.0
+ golang.org/x/text v0.34.0
+ google.golang.org/api v0.267.0
+ google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d
google.golang.org/grpc v1.78.0
google.golang.org/protobuf v1.36.11
- k8s.io/api v0.35.0
- k8s.io/apimachinery v0.35.0
- k8s.io/client-go v0.35.0
+ k8s.io/api v0.35.1
+ k8s.io/apimachinery v0.35.1
+ k8s.io/client-go v0.35.1
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.130.1
)
@@ -146,11 +147,11 @@ require (
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.30.10 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
- github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect
+ github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
@@ -181,7 +182,7 @@ require (
github.com/google/go-querystring v1.2.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect
- github.com/googleapis/gax-go/v2 v2.16.0 // indirect
+ github.com/googleapis/gax-go/v2 v2.17.0 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect
github.com/hashicorp/cronexpr v1.1.3 // indirect
@@ -231,7 +232,7 @@ require (
github.com/stretchr/objx v0.5.2 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
- go.mongodb.org/mongo-driver v1.17.6 // indirect
+ go.mongodb.org/mongo-driver v1.17.9 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/collector/confmap v1.51.0 // indirect
go.opentelemetry.io/collector/confmap/xconfmap v0.145.0 // indirect
@@ -246,7 +247,7 @@ require (
golang.org/x/term v0.39.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.41.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.1 // indirect
diff --git a/go.sum b/go.sum
index bcb7b8fcc1..8683d3efe3 100644
--- a/go.sum
+++ b/go.sum
@@ -51,10 +51,10 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
-github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
-github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
-github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
-github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
+github.com/aws/aws-sdk-go-v2/config v1.32.9 h1:ktda/mtAydeObvJXlHzyGpK1xcsLaP16zfUPDGoW90A=
+github.com/aws/aws-sdk-go-v2/config v1.32.9/go.mod h1:U+fCQ+9QKsLW786BCfEjYRj34VVTbPdsLP3CHSYXMOI=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.9 h1:sWvTKsyrMlJGEuj/WgrwilpoJ6Xa1+KhIpGdzw7mMU8=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.9/go.mod h1:+J44MBhmfVY/lETFiKI+klz0Vym2aCmIjqgClMmW82w=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
@@ -63,28 +63,30 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
-github.com/aws/aws-sdk-go-v2/service/ec2 v1.285.0 h1:cRZQsqCy59DSJmvmUYzi9K+dutysXzfx6F+fkcIHtOk=
-github.com/aws/aws-sdk-go-v2/service/ec2 v1.285.0/go.mod h1:Uy+C+Sc58jozdoL1McQr8bDsEvNFx+/nBY+vpO1HVUY=
-github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0 h1:MzP/ElwTpINq+hS80ZQz4epKVnUTlz8Sz+P/AFORCKM=
-github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0/go.mod h1:pMlGFDpHoLTJOIZHGdJOAWmi+xeIlQXuFTuQxs1epYE=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.290.0 h1:Ub4CvLWf8wEQ7/pEiqXM9tTsHXf2BokPLwbqEvrmAq0=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.290.0/go.mod h1:Uy+C+Sc58jozdoL1McQr8bDsEvNFx+/nBY+vpO1HVUY=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.72.0 h1:hggRKpv26DpYMOik3wWo1Ty5MkANoXhNobjfWpC3G4M=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.72.0/go.mod h1:pMlGFDpHoLTJOIZHGdJOAWmi+xeIlQXuFTuQxs1epYE=
+github.com/aws/aws-sdk-go-v2/service/elasticache v1.51.9 h1:hTgZLyNoDWphZUtTtcvQh0LP6TZO0mtdSfZK/GObDLk=
+github.com/aws/aws-sdk-go-v2/service/elasticache v1.51.9/go.mod h1:91RkIYy9ubykxB50XGYDsbljLZnrZ6rp/Urt4rZrbwQ=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
-github.com/aws/aws-sdk-go-v2/service/kafka v1.46.7 h1:0jDb9b505gbCmtjH1RT7kx8hDbVDzOhnTeZm7dzskpQ=
-github.com/aws/aws-sdk-go-v2/service/kafka v1.46.7/go.mod h1:tWnHS64fg5ydLHivFlCAtEh/1iMNzr56QsH3F+UTwD4=
+github.com/aws/aws-sdk-go-v2/service/kafka v1.48.0 h1:CKRWqysU9INeoi0nTI9gDzDAJk+GatzFduVYxT/wkrw=
+github.com/aws/aws-sdk-go-v2/service/kafka v1.48.0/go.mod h1:tWnHS64fg5ydLHivFlCAtEh/1iMNzr56QsH3F+UTwD4=
github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11 h1:VM5e5M39zRSs+aT0O9SoxHjUXqXxhbw3Yi0FdMQWPIc=
github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11/go.mod h1:0jvzYPIQGCpnY/dmdaotTk2JH4QuBlnW0oeyrcGLWJ4=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.10 h1:+VTRawC4iVY58pS/lzpo0lnoa/SYNGF4/B/3/U5ro8Y=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.10/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14 h1:0jbJeuEHlwKJ9PfXtpSFc4MF+WIWORdhN1n30ITZGFM=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
-github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
-github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
+github.com/aws/smithy-go v1.24.1 h1:VbyeNfmYkWoxMVpGUAbQumkODcYmfMRfZ8yQiH30SK0=
+github.com/aws/smithy-go v1.24.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
github.com/basgys/goxml2json v1.1.1-0.20231018121955-e66ee54ceaad h1:3swAvbzgfaI6nKuDDU7BiKfZRdF+h2ZwKgMHd8Ha4t8=
@@ -113,8 +115,8 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
-github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0=
-github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4=
+github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e h1:gt7U1Igw0xbJdyaCM5H2CnlAlPSkzrhsebQB6WQWjLA=
+github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
@@ -129,8 +131,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
-github.com/digitalocean/godo v1.173.0 h1:tgzevGhlz9VFjk2y3NmeItUT4vIVVCRFETlG/1GlEQI=
-github.com/digitalocean/godo v1.173.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
+github.com/digitalocean/godo v1.175.0 h1:tpfwJFkBzpePxvvFazOn69TXctdxuFlOs7DMVXsI7oU=
+github.com/digitalocean/godo v1.175.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
@@ -145,10 +147,10 @@ github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g=
-github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98=
-github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4=
-github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA=
+github.com/envoyproxy/go-control-plane/envoy v1.37.0 h1:u3riX6BoYRfF4Dr7dwSOroNfdSbEPe9Yyl09/B6wBrQ=
+github.com/envoyproxy/go-control-plane/envoy v1.37.0/go.mod h1:DReE9MMrmecPy+YvQOAOHNYMALuowAnbjjEMkkWOi6A=
+github.com/envoyproxy/protoc-gen-validate v1.3.3 h1:MVQghNeW+LZcmXe7SY1V36Z+WFMDjpqGAGacLe2T0ds=
+github.com/envoyproxy/protoc-gen-validate v1.3.3/go.mod h1:TsndJ/ngyIdQRhMcVVGDDHINPLWB7C82oDArY51KfB0=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@@ -268,8 +270,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao=
github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8=
-github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y=
-github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14=
+github.com/googleapis/gax-go/v2 v2.17.0 h1:RksgfBpxqff0EZkDWYuz9q/uWsTVz+kf43LsZ1J6SMc=
+github.com/googleapis/gax-go/v2 v2.17.0/go.mod h1:mzaqghpQp4JDh3HvADwrat+6M3MOIDp5YKHhb9PAgDY=
github.com/gophercloud/gophercloud/v2 v2.10.0 h1:NRadC0aHNvy4iMoFXj5AFiPmut/Sj3hAPAo9B59VMGc=
github.com/gophercloud/gophercloud/v2 v2.10.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
@@ -329,8 +331,8 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/memberlist v0.5.4 h1:40YY+3qq2tAUhZIMEK8kqusKZBBjdwJ3NUjvYkcxh74=
github.com/hashicorp/memberlist v0.5.4/go.mod h1:OgN6xiIo6RlHUWk+ALjP9e32xWCoQrsOCmHrWCm2MWA=
-github.com/hashicorp/nomad/api v0.0.0-20260205205048-8315996478d1 h1:2T7Ay5FMAnZUBxSbrkjufY5YKiLPWij0dDPnbM/KYak=
-github.com/hashicorp/nomad/api v0.0.0-20260205205048-8315996478d1/go.mod h1:JAmS1nGJ1KcTM+MHAkgyrL0GDbsnKiJsp75KyqO2wWc=
+github.com/hashicorp/nomad/api v0.0.0-20260220212019-daca79db0bd6 h1:QN/GwpGyiW8RdNcHGMA1xVnM8tJkAGNDR/BZ47XR+OU=
+github.com/hashicorp/nomad/api v0.0.0-20260220212019-daca79db0bd6/go.mod h1:KkLNLU0Nyfh5jWsFoF/PsmMbKpRIAoIV4lmQoJWgKCk=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.36.0 h1:HlLL/aaVXUulqe+rsjoJmrxKhPi1MflL5O9iq5QEtvo=
@@ -354,8 +356,8 @@ github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRt
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
-github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
+github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
+github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
@@ -435,8 +437,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 h1:dOYG7LS/WK00RWZc8XGgcUTlTxpp3mKhdR2Q9z9HbXM=
-github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8=
+github.com/nsf/jsondiff v0.0.0-20260207060731-8e8d90c4c0ac h1:4YV96Dzy2csSnhzl14/Qk5YsSrKAQusGsIADDn/4/g8=
+github.com/nsf/jsondiff v0.0.0-20260207060731-8e8d90c4c0ac/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8=
github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E=
github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
@@ -489,8 +491,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
-github.com/prometheus/alertmanager v0.31.0 h1:DQW02uIUNNiAa9AD9VA5xaFw5D+xrV+bocJc4gN9bEU=
-github.com/prometheus/alertmanager v0.31.0/go.mod h1:zWPQwhbLt2ybee8rL921UONeQ59Oncash+m/hGP17tU=
+github.com/prometheus/alertmanager v0.31.1 h1:eAmIC42lzbWslHkMt693T36qdxfyZULswiHr681YS3Q=
+github.com/prometheus/alertmanager v0.31.1/go.mod h1:zWPQwhbLt2ybee8rL921UONeQ59Oncash+m/hGP17tU=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
@@ -570,8 +572,8 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss=
-go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
+go.mongodb.org/mongo-driver v1.17.9 h1:IexDdCuuNJ3BHrELgBlyaH9p60JXAvdzWR128q+U5tU=
+go.mongodb.org/mongo-driver v1.17.9/go.mod h1:LlOhpH5NUEfhxcAwG0UEkMqwYcc4JU18gtCdGudk/tQ=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/collector/component v1.51.0 h1:btNW76MCRmpsk0ARRT5wspDXF9tvdaLd3uBtYXIiQn0=
@@ -686,8 +688,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
-golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
-golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
+golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ=
+golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -728,8 +730,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
-golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
+golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@@ -745,8 +747,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
-golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
+golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
+golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -766,14 +768,14 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
-google.golang.org/api v0.265.0 h1:FZvfUdI8nfmuNrE34aOWFPmLC+qRBEiNm3JdivTvAAU=
-google.golang.org/api v0.265.0/go.mod h1:uAvfEl3SLUj/7n6k+lJutcswVojHPp2Sp08jWCu8hLY=
-google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934=
-google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0=
-google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 h1:7ei4lp52gK1uSejlA8AZl5AJjeLUOHBQscRQZUgAcu0=
-google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20/go.mod h1:ZdbssH/1SOVnjnDlXzxDHK2MCidiqXtbYccJNzNYPEE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/api v0.267.0 h1:w+vfWPMPYeRs8qH1aYYsFX68jMls5acWl/jocfLomwE=
+google.golang.org/api v0.267.0/go.mod h1:Jzc0+ZfLnyvXma3UtaTl023TdhZu6OMBP9tJ+0EmFD0=
+google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM=
+google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM=
+google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d h1:EocjzKLywydp5uZ5tJ79iP6Q0UjDnyiHkGRWxuPBP8s=
+google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:48U2I+QQUYhsFrg2SY6r+nJzeOtjey7j//WBESw+qyQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
@@ -802,12 +804,12 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
-k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
-k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
-k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
-k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
-k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
-k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
+k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
+k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
+k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
+k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
+k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
+k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
diff --git a/go.work b/go.work
index 4d53344b16..aea341baab 100644
--- a/go.work
+++ b/go.work
@@ -5,4 +5,5 @@ use (
./documentation/examples/remote_storage
./internal/tools
./web/ui/mantine-ui/src/promql/tools
+ ./compliance
)
diff --git a/internal/tools/go.mod b/internal/tools/go.mod
index f3853a86c6..041724c22d 100644
--- a/internal/tools/go.mod
+++ b/internal/tools/go.mod
@@ -6,7 +6,7 @@ require (
github.com/bufbuild/buf v1.65.0
github.com/daixiang0/gci v0.13.7
github.com/gogo/protobuf v1.3.2
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0
)
require (
@@ -108,7 +108,7 @@ require (
golang.org/x/time v0.14.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect
- google.golang.org/grpc v1.78.0 // indirect
+ google.golang.org/grpc v1.79.1 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gotest.tools/v3 v3.5.1 // indirect
diff --git a/internal/tools/go.sum b/internal/tools/go.sum
index ab0255fd6e..62602903c7 100644
--- a/internal/tools/go.sum
+++ b/internal/tools/go.sum
@@ -107,8 +107,8 @@ github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4p
github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8 h1:NpbJl/eVbvrGE0MJ6X16X9SAifesl6Fwxg/YmCvubRI=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8/go.mod h1:mi7YA+gCzVem12exXy46ZespvGtX/lZmD/RLnQhVW7U=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@@ -279,8 +279,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
-google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
-google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
+google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY=
+google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/model/labels/regexp.go b/model/labels/regexp.go
index 5f4f753419..f446b5358a 100644
--- a/model/labels/regexp.go
+++ b/model/labels/regexp.go
@@ -67,6 +67,9 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
if err != nil {
return nil, err
}
+
+ parsed = optimizeAlternatingSimpleContains(parsed)
+
m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$")
if err != nil {
return nil, err
@@ -369,6 +372,43 @@ func optimizeAlternatingLiterals(s string) (StringMatcher, []string) {
return multiMatcher, multiMatcher.setMatches()
}
+// optimizeAlternatingSimpleContains checks to see if a regex is a series of alternations that take the form .*literal.*
+// In these cases, the regex itself can be rewritten as .*(foo|bar).*,
+// which can result in a significant performance improvement at execution.
+func optimizeAlternatingSimpleContains(r *syntax.Regexp) *syntax.Regexp {
+ if r.Op != syntax.OpAlternate {
+ return r
+ }
+ containsLiterals := make([]*syntax.Regexp, 0, len(r.Sub))
+ for _, sub := range r.Sub {
+ // If any subexpression does not take the form .*literal.*, we should not try to optimize this
+ if sub.Op != syntax.OpConcat || len(sub.Sub) != 3 {
+ return r
+ }
+ concatSubs := sub.Sub
+ if !isCaseSensitiveLiteral(concatSubs[1]) || !isMatchAny(concatSubs[0]) || !isMatchAny(concatSubs[2]) {
+ return r
+ }
+ containsLiterals = append(containsLiterals, concatSubs[1])
+ }
+
+ // Only rewrite the regex if there's more than one literal
+ if len(containsLiterals) > 1 {
+ returnRegex := &syntax.Regexp{Op: syntax.OpConcat}
+ prefixAnyMatcher := &syntax.Regexp{Op: syntax.OpStar, Sub: []*syntax.Regexp{{Op: syntax.OpAnyChar}}, Flags: syntax.Perl | syntax.DotNL}
+ suffixAnyMatcher := &syntax.Regexp{Op: syntax.OpStar, Sub: []*syntax.Regexp{{Op: syntax.OpAnyChar}}, Flags: syntax.Perl | syntax.DotNL}
+ alts := &syntax.Regexp{Op: syntax.OpAlternate}
+ alts.Sub = containsLiterals
+ returnRegex.Sub = []*syntax.Regexp{
+ prefixAnyMatcher,
+ alts,
+ suffixAnyMatcher,
+ }
+ return returnRegex
+ }
+ return r
+}
+
// optimizeConcatRegex returns literal prefix/suffix text that can be safely
// checked against the label value before running the regexp matcher.
func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string, contains []string) {
diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go
index d4385c7481..be3417a8c0 100644
--- a/model/labels/regexp_test.go
+++ b/model/labels/regexp_test.go
@@ -46,6 +46,13 @@ var (
"foo\n.*",
".*foo.*",
".+foo.+",
+ ".*foo.*|",
+ ".*foo.*|bar.*",
+ "foo.*|.*bar.*",
+ ".*foo.*|.*bar.*",
+ ".*foo.*bar.*|.*hello.*",
+ ".*foo.*|.*bar.*|.*hello.*",
+ ".+.*foo.*|.*bar.*",
"(?s:.*)",
"(?s:.+)",
"(?s:^.*foo$)",
@@ -68,6 +75,8 @@ var (
// values of a label like kubernetes pod will often include the
// deployment name as a prefix.
"jyyfj00j0061|jyyfj00j0062|jyyfj94j0093|jyyfj99j0093|jyyfm01j0021|jyyfm02j0021|jyefj00j0192|jyefj00j0193|jyefj00j0194|jyefj00j0195|jyefj00j0196|jyefj00j0197|jyefj00j0290|jyefj00j0291|jyefj00j0292|jyefj00j0293|jyefj00j0294|jyefj00j0295|jyefj00j0296|jyefj00j0297|jyefj89j0394|jyefj90j0394|jyefj91j0394|jyefj95j0347|jyefj96j0322|jyefj96j0347|jyefj97j0322|jyefj97j0347|jyefj98j0322|jyefj98j0347|jyefj99j0320|jyefj99j0322|jyefj99j0323|jyefj99j0335|jyefj99j0336|jyefj99j0344|jyefj99j0347|jyefj99j0349|jyefj99j0351|jyeff00j0117|lyyfm01j0025|lyyfm01j0028|lyyfm01j0041|lyyfm01j0133|lyyfm01j0701|lyyfm02j0025|lyyfm02j0028|lyyfm02j0041|lyyfm02j0133|lyyfm02j0701|lyyfm03j0701|lyefj00j0775|lyefj00j0776|lyefj00j0777|lyefj00j0778|lyefj00j0779|lyefj00j0780|lyefj00j0781|lyefj00j0782|lyefj50j3807|lyefj50j3852|lyefj51j3807|lyefj51j3852|lyefj52j3807|lyefj52j3852|lyefj53j3807|lyefj53j3852|lyefj54j3807|lyefj54j3852|lyefj54j3886|lyefj55j3807|lyefj55j3852|lyefj55j3886|lyefj56j3807|lyefj56j3852|lyefj56j3886|lyefj57j3807|lyefj57j3852|lyefj57j3886|lyefj58j3807|lyefj58j3852|lyefj58j3886|lyefj59j3807|lyefj59j3852|lyefj59j3886|lyefj60j3807|lyefj60j3852|lyefj60j3886|lyefj61j3807|lyefj61j3852|lyefj61j3886|lyefj62j3807|lyefj62j3852|lyefj62j3886|lyefj63j3807|lyefj63j3852|lyefj63j3886|lyefj64j3807|lyefj64j3852|lyefj64j3886|lyefj65j3807|lyefj65j3852|lyefj65j3886|lyefj66j3807|lyefj66j3852|lyefj66j3886|lyefj67j3807|lyefj67j3852|lyefj67j3886|lyefj68j3807|lyefj68j3852|lyefj68j3886|lyefj69j3807|lyefj69j3846|lyefj69j3852|lyefj69j3886|lyefj70j3807|lyefj70j3846|lyefj70j3852|lyefj70j3886|lyefj71j3807|lyefj71j3846|lyefj71j3852|lyefj71j3886|lyefj72j3807|lyefj72j3846|lyefj72j3852|lyefj72j3886|lyefj73j3807|lyefj73j3846|lyefj73j3852|lyefj73j3886|lyefj74j3807|lyefj74j3846|lyefj74j3852|lyefj74j3886|lyefj75j3807|lyefj75j3808|lyefj75j3846|lyefj75j3852|lyefj75j3886|lyefj76j3732|lyefj76j3807|lyefj76j3808|lyefj76j3846|lyefj76j3852|lyefj76j3886|lyefj77j3732|lyefj77j3807|lyefj77j3808|lyefj77j3846|lyefj77j3852|lyefj77j3886|lyefj78j3278|lyefj78j3732|lyefj78j3807|lyefj78j3808|lyefj78j3846|lyefj78j3852|lyefj78j3886|lyefj79j3732|lyefj79j3807|lyefj79j3808|lyefj79j3846|lyefj79j3852|lyefj79j3886|lyefj80j3732|lyefj80j3807|lyefj80j3808|lyefj80j3846|lyefj80j3852|lyefj80j3886|lyefj81j3732|lyefj81j3807|lyefj81j3808|lyefj81j3846|lyefj81j3852|lyefj81j3886|lyefj82j3732|lyefj82j3807|lyefj82j3808|lyefj82j3846|lyefj82j3852|lyefj82j3886|lyefj83j3732|lyefj83j3807|lyefj83j3808|lyefj83j3846|lyefj83j3852|lyefj83j3886|lyefj84j3732|lyefj84j3807|lyefj84j3808|lyefj84j3846|lyefj84j3852|lyefj84j3886|lyefj85j3732|lyefj85j3807|lyefj85j3808|lyefj85j3846|lyefj85j3852|lyefj85j3886|lyefj86j3278|lyefj86j3732|lyefj86j3807|lyefj86j3808|lyefj86j3846|lyefj86j3852|lyefj86j3886|lyefj87j3278|lyefj87j3732|lyefj87j3807|lyefj87j3808|lyefj87j3846|lyefj87j3852|lyefj87j3886|lyefj88j3732|lyefj88j3807|lyefj88j3808|lyefj88j3846|lyefj88j3852|lyefj88j3886|lyefj89j3732|lyefj89j3807|lyefj89j3808|lyefj89j3846|lyefj89j3852|lyefj89j3886|lyefj90j3732|lyefj90j3807|lyefj90j3808|lyefj90j3846|lyefj90j3852|lyefj90j3886|lyefj91j3732|lyefj91j3807|lyefj91j3808|lyefj91j3846|lyefj91j3852|lyefj91j3886|lyefj92j3732|lyefj92j3807|lyefj92j3808|lyefj92j3846|lyefj92j3852|lyefj92j3886|lyefj93j3732|lyefj93j3807|lyefj93j3808|lyefj93j3846|lyefj93j3852|lyefj93j3885|lyefj93j3886|lyefj94j3525|lyefj94j3732|lyefj94j3807|lyefj94j3808|lyefj94j3846|lyefj94j3852|lyefj94j3885|lyefj94j3886|lyefj95j3525|lyefj95j3732|lyefj95j3807|lyefj95j3808|lyefj95j3846|lyefj95j3852|lyefj95j3886|lyefj96j3732|lyefj96j3803|lyefj96j3807|lyefj96j3808|lyefj96j3846|lyefj96j3852|lyefj96j3886|lyefj97j3333|lyefj97j3732|lyefj97j3792|lyefj97j3803|lyefj97j3807|lyefj97j3808|lyefj97j3838|lyefj97j3843|lyefj97j3846|lyefj97j3852|lyefj97j3886|lyefj98j3083|lyefj98j3333|lyefj98j3732|lyefj98j3807|lyefj98j3808|lyefj98j3838|lyefj98j3843|lyefj98j3846|lyefj98j3852|lyefj98j3873|lyefj98j3877|lyefj98j3882|lyefj98j3886|lyefj99j2984|lyefj99j3083|lyefj99j3333|lyefj99j3732|lyefj99j3807|lyefj99j3808|lyefj99j3846|lyefj99j3849|lyefj99j3852|lyefj99j3873|lyefj99j3877|lyefj99j3882|lyefj99j3884|lyefj99j3886|lyeff00j0106|lyeff00j0107|lyeff00j0108|lyeff00j0129|lyeff00j0130|lyeff00j0131|lyeff00j0132|lyeff00j0133|lyeff00j0134|lyeff00j0444|lyeff00j0445|lyeff91j0473|lyeff92j0473|lyeff92j3877|lyeff93j3877|lyeff94j0501|lyeff94j3525|lyeff94j3877|lyeff95j0501|lyeff95j3525|lyeff95j3877|lyeff96j0503|lyeff96j3877|lyeff97j3877|lyeff98j3333|lyeff98j3877|lyeff99j2984|lyeff99j3333|lyeff99j3877|mfyr9149ej|mfyr9149ek|mfyr9156ej|mfyr9156ek|mfyr9157ej|mfyr9157ek|mfyr9159ej|mfyr9159ek|mfyr9203ej|mfyr9204ej|mfyr9205ej|mfyr9206ej|mfyr9207ej|mfyr9207ek|mfyr9217ej|mfyr9217ek|mfyr9222ej|mfyr9222ek|mfyu0185ej|mfye9187ej|mfye9187ek|mfye9188ej|mfye9188ek|mfye9189ej|mfye9189ek|mfyf0185ej|oyefj87j0007|oyefj88j0007|oyefj89j0007|oyefj90j0007|oyefj91j0007|oyefj95j0001|oyefj96j0001|oyefj98j0004|oyefj99j0004|oyeff91j0004|oyeff92j0004|oyeff93j0004|oyeff94j0004|oyeff95j0004|oyeff96j0004|rklvyaxmany|ryefj93j0001|ryefj94j0001|tyyfj00a0001|tyyfj84j0005|tyyfj85j0005|tyyfj86j0005|tyyfj87j0005|tyyfj88j0005|tyyfj89j0005|tyyfj90j0005|tyyfj91j0005|tyyfj92j0005|tyyfj93j0005|tyyfj94j0005|tyyfj95j0005|tyyfj96j0005|tyyfj97j0005|tyyfj98j0005|tyyfj99j0005|tyefj50j0015|tyefj50j0017|tyefj50j0019|tyefj50j0020|tyefj50j0021|tyefj51j0015|tyefj51j0017|tyefj51j0019|tyefj51j0020|tyefj51j0021|tyefj52j0015|tyefj52j0017|tyefj52j0019|tyefj52j0020|tyefj52j0021|tyefj53j0015|tyefj53j0017|tyefj53j0019|tyefj53j0020|tyefj53j0021|tyefj54j0015|tyefj54j0017|tyefj54j0019|tyefj54j0020|tyefj54j0021|tyefj55j0015|tyefj55j0017|tyefj55j0019|tyefj55j0020|tyefj55j0021|tyefj56j0015|tyefj56j0017|tyefj56j0019|tyefj56j0020|tyefj56j0021|tyefj57j0015|tyefj57j0017|tyefj57j0019|tyefj57j0020|tyefj57j0021|tyefj58j0015|tyefj58j0017|tyefj58j0019|tyefj58j0020|tyefj58j0021|tyefj59j0015|tyefj59j0017|tyefj59j0019|tyefj59j0020|tyefj59j0021|tyefj60j0015|tyefj60j0017|tyefj60j0019|tyefj60j0020|tyefj60j0021|tyefj61j0015|tyefj61j0017|tyefj61j0019|tyefj61j0020|tyefj61j0021|tyefj62j0015|tyefj62j0017|tyefj62j0019|tyefj62j0020|tyefj62j0021|tyefj63j0015|tyefj63j0017|tyefj63j0019|tyefj63j0020|tyefj63j0021|tyefj64j0015|tyefj64j0017|tyefj64j0019|tyefj64j0020|tyefj64j0021|tyefj65j0015|tyefj65j0017|tyefj65j0019|tyefj65j0020|tyefj65j0021|tyefj66j0015|tyefj66j0017|tyefj66j0019|tyefj66j0020|tyefj66j0021|tyefj67j0015|tyefj67j0017|tyefj67j0019|tyefj67j0020|tyefj67j0021|tyefj68j0015|tyefj68j0017|tyefj68j0019|tyefj68j0020|tyefj68j0021|tyefj69j0015|tyefj69j0017|tyefj69j0019|tyefj69j0020|tyefj69j0021|tyefj70j0015|tyefj70j0017|tyefj70j0019|tyefj70j0020|tyefj70j0021|tyefj71j0015|tyefj71j0017|tyefj71j0019|tyefj71j0020|tyefj71j0021|tyefj72j0015|tyefj72j0017|tyefj72j0019|tyefj72j0020|tyefj72j0021|tyefj72j0022|tyefj73j0015|tyefj73j0017|tyefj73j0019|tyefj73j0020|tyefj73j0021|tyefj73j0022|tyefj74j0015|tyefj74j0017|tyefj74j0019|tyefj74j0020|tyefj74j0021|tyefj74j0022|tyefj75j0015|tyefj75j0017|tyefj75j0019|tyefj75j0020|tyefj75j0021|tyefj75j0022|tyefj76j0015|tyefj76j0017|tyefj76j0019|tyefj76j0020|tyefj76j0021|tyefj76j0022|tyefj76j0119|tyefj77j0015|tyefj77j0017|tyefj77j0019|tyefj77j0020|tyefj77j0021|tyefj77j0022|tyefj77j0119|tyefj78j0015|tyefj78j0017|tyefj78j0019|tyefj78j0020|tyefj78j0021|tyefj78j0022|tyefj78j0119|tyefj79j0015|tyefj79j0017|tyefj79j0019|tyefj79j0020|tyefj79j0021|tyefj79j0022|tyefj79j0119|tyefj80j0015|tyefj80j0017|tyefj80j0019|tyefj80j0020|tyefj80j0021|tyefj80j0022|tyefj80j0114|tyefj80j0119|tyefj81j0015|tyefj81j0017|tyefj81j0019|tyefj81j0020|tyefj81j0021|tyefj81j0022|tyefj81j0114|tyefj81j0119|tyefj82j0015|tyefj82j0017|tyefj82j0019|tyefj82j0020|tyefj82j0021|tyefj82j0022|tyefj82j0119|tyefj83j0015|tyefj83j0017|tyefj83j0019|tyefj83j0020|tyefj83j0021|tyefj83j0022|tyefj83j0119|tyefj84j0014|tyefj84j0015|tyefj84j0017|tyefj84j0019|tyefj84j0020|tyefj84j0021|tyefj84j0022|tyefj84j0119|tyefj85j0014|tyefj85j0015|tyefj85j0017|tyefj85j0019|tyefj85j0020|tyefj85j0021|tyefj85j0022|tyefj85j0119|tyefj86j0014|tyefj86j0015|tyefj86j0017|tyefj86j0019|tyefj86j0020|tyefj86j0021|tyefj86j0022|tyefj87j0014|tyefj87j0015|tyefj87j0017|tyefj87j0019|tyefj87j0020|tyefj87j0021|tyefj87j0022|tyefj88j0014|tyefj88j0015|tyefj88j0017|tyefj88j0019|tyefj88j0020|tyefj88j0021|tyefj88j0022|tyefj88j0100|tyefj88j0115|tyefj89j0003|tyefj89j0014|tyefj89j0015|tyefj89j0017|tyefj89j0019|tyefj89j0020|tyefj89j0021|tyefj89j0022|tyefj89j0100|tyefj89j0115|tyefj90j0014|tyefj90j0015|tyefj90j0016|tyefj90j0017|tyefj90j0018|tyefj90j0019|tyefj90j0020|tyefj90j0021|tyefj90j0022|tyefj90j0100|tyefj90j0111|tyefj90j0115|tyefj91j0014|tyefj91j0015|tyefj91j0016|tyefj91j0017|tyefj91j0018|tyefj91j0019|tyefj91j0020|tyefj91j0021|tyefj91j0022|tyefj91j0100|tyefj91j0111|tyefj91j0115|tyefj92j0014|tyefj92j0015|tyefj92j0016|tyefj92j0017|tyefj92j0018|tyefj92j0019|tyefj92j0020|tyefj92j0021|tyefj92j0022|tyefj92j0100|tyefj92j0105|tyefj92j0115|tyefj92j0121|tyefj93j0004|tyefj93j0014|tyefj93j0015|tyefj93j0017|tyefj93j0018|tyefj93j0019|tyefj93j0020|tyefj93j0021|tyefj93j0022|tyefj93j0100|tyefj93j0105|tyefj93j0115|tyefj93j0121|tyefj94j0002|tyefj94j0004|tyefj94j0008|tyefj94j0014|tyefj94j0015|tyefj94j0017|tyefj94j0019|tyefj94j0020|tyefj94j0021|tyefj94j0022|tyefj94j0084|tyefj94j0088|tyefj94j0100|tyefj94j0106|tyefj94j0116|tyefj94j0121|tyefj94j0123|tyefj95j0002|tyefj95j0004|tyefj95j0008|tyefj95j0014|tyefj95j0015|tyefj95j0017|tyefj95j0019|tyefj95j0020|tyefj95j0021|tyefj95j0022|tyefj95j0084|tyefj95j0088|tyefj95j0100|tyefj95j0101|tyefj95j0106|tyefj95j0112|tyefj95j0116|tyefj95j0121|tyefj95j0123|tyefj96j0014|tyefj96j0015|tyefj96j0017|tyefj96j0019|tyefj96j0020|tyefj96j0021|tyefj96j0022|tyefj96j0082|tyefj96j0084|tyefj96j0100|tyefj96j0101|tyefj96j0112|tyefj96j0117|tyefj96j0121|tyefj96j0124|tyefj97j0014|tyefj97j0015|tyefj97j0017|tyefj97j0019|tyefj97j0020|tyefj97j0021|tyefj97j0022|tyefj97j0081|tyefj97j0087|tyefj97j0098|tyefj97j0100|tyefj97j0107|tyefj97j0109|tyefj97j0113|tyefj97j0117|tyefj97j0118|tyefj97j0121|tyefj98j0003|tyefj98j0006|tyefj98j0014|tyefj98j0015|tyefj98j0017|tyefj98j0019|tyefj98j0020|tyefj98j0021|tyefj98j0022|tyefj98j0083|tyefj98j0085|tyefj98j0086|tyefj98j0100|tyefj98j0104|tyefj98j0118|tyefj98j0121|tyefj99j0003|tyefj99j0006|tyefj99j0007|tyefj99j0014|tyefj99j0015|tyefj99j0017|tyefj99j0019|tyefj99j0020|tyefj99j0021|tyefj99j0022|tyefj99j0023|tyefj99j0100|tyefj99j0108|tyefj99j0110|tyefj99j0121|tyefj99j0125|tyeff94j0002|tyeff94j0008|tyeff94j0010|tyeff94j0011|tyeff94j0035|tyeff95j0002|tyeff95j0006|tyeff95j0008|tyeff95j0010|tyeff95j0011|tyeff95j0035|tyeff96j0003|tyeff96j0006|tyeff96j0009|tyeff96j0010|tyeff97j0004|tyeff97j0009|tyeff97j0116|tyeff98j0007|tyeff99j0007|tyeff99j0125|uyyfj00j0484|uyyfj00j0485|uyyfj00j0486|uyyfj00j0487|uyyfj00j0488|uyyfj00j0489|uyyfj00j0490|uyyfj00j0491|uyyfj00j0492|uyyfj00j0493|uyyfj00j0494|uyyfj00j0495|uyyfj00j0496|uyyfj00j0497|uyyfj00j0498|uyyfj00j0499|uyyfj00j0500|uyyfj00j0501|uyyfj00j0502|uyyfj00j0503|uyyfj00j0504|uyyfj00j0505|uyyfj00j0506|uyyfj00j0507|uyyfj00j0508|uyyfj00j0509|uyyfj00j0510|uyyfj00j0511|uyyfj00j0512|uyyfj00j0513|uyyfj00j0514|uyyfj00j0515|uyyfj00j0516|uyyfj00j0517|uyyfj00j0518|uyyfj00j0519|uyyfj00j0520|uyyfj00j0521|uyyfj00j0522|uyyfj00j0523|uyyfj00j0524|uyyfj00j0525|uyyfj00j0526|uyyfj00j0527|uyyfj00j0528|uyyfj00j0529|uyyfj00j0530|uyyfj00j0531|uyyfj00j0532|uyyfj00j0533|uyyfj00j0534|uyyfj00j0535|uyyfj00j0536|uyyfj00j0537|uyyfj00j0538|uyyfj00j0539|uyyfj00j0540|uyyfj00j0541|uyyfj00j0542|uyyfj00j0543|uyyfj00j0544|uyyfj00j0545|uyyfj00j0546|uyyfj00j0547|uyyfj00j0548|uyyfj00j0549|uyyfj00j0550|uyyfj00j0551|uyyfj00j0553|uyyfj00j0554|uyyfj00j0555|uyyfj00j0556|uyyfj00j0557|uyyfj00j0558|uyyfj00j0559|uyyfj00j0560|uyyfj00j0561|uyyfj00j0562|uyyfj00j0563|uyyfj00j0564|uyyfj00j0565|uyyfj00j0566|uyyfj00j0614|uyyfj00j0615|uyyfj00j0616|uyyfj00j0617|uyyfj00j0618|uyyfj00j0619|uyyfj00j0620|uyyfj00j0621|uyyfj00j0622|uyyfj00j0623|uyyfj00j0624|uyyfj00j0625|uyyfj00j0626|uyyfj00j0627|uyyfj00j0628|uyyfj00j0629|uyyfj00j0630|uyyfj00j0631|uyyfj00j0632|uyyfj00j0633|uyyfj00j0634|uyyfj00j0635|uyyfj00j0636|uyyfj00j0637|uyyfj00j0638|uyyfj00j0639|uyyfj00j0640|uyyfj00j0641|uyyfj00j0642|uyyfj00j0643|uyyfj00j0644|uyyfj00j0645|uyyfj00j0646|uyyfj00j0647|uyyfj00j0648|uyyfj00j0649|uyyfj00j0650|uyyfj00j0651|uyyfj00j0652|uyyfj00j0653|uyyfj00j0654|uyyfj00j0655|uyyfj00j0656|uyyfj00j0657|uyyfj00j0658|uyyfj00j0659|uyyfj00j0660|uyyfj00j0661|uyyfj00j0662|uyyfj00j0663|uyyfj00j0664|uyyfj00j0665|uyyfj00j0666|uyyfj00j0667|uyyfj00j0668|uyyfj00j0669|uyyfj00j0670|uyyfj00j0671|uyyfj00j0672|uyyfj00j0673|uyyfj00j0674|uyyfj00j0675|uyyfj00j0676|uyyfj00j0677|uyyfj00j0678|uyyfj00j0679|uyyfj00j0680|uyyfj00j0681|uyyfj00j0682|uyyfj00j0683|uyyfj00j0684|uyyfj00j0685|uyyfj00j0686|uyyfj00j0687|uyyfj00j0688|uyyfj00j0689|uyyfj00j0690|uyyfj00j0691|uyyfj00j0692|uyyfj00j0693|uyyfj00j0694|uyyfj00j0695|uyyfj00j0696|uyyfj00j0697|uyyfj00j0698|uyyfj00j0699|uyyfj00j0700|uyyfj00j0701|uyyfj00j0702|uyyfj00j0703|uyyfj00j0704|uyyfj00j0705|uyyfj00j0706|uyyfj00j0707|uyyfj00j0708|uyyfj00j0709|uyyfj00j0710|uyyfj00j0711|uyyfj00j0712|uyyfj00j0713|uyyfj00j0714|uyyfj00j0715|uyyfj00j0716|uyyfj00j0717|uyyfj00j0718|uyyfj00j0719|uyyfj00j0720|uyyfj00j0721|uyyfj00j0722|uyyfj00j0723|uyyfj00j0724|uyyfj00j0725|uyyfj00j0726|uyyfj00j0727|uyyfj00j0728|uyyfj00j0729|uyyfj00j0730|uyyfj00j0731|uyyfj00j0732|uyyfj00j0733|uyyfj00j0734|uyyfj00j0735|uyyfj00j0736|uyyfj00j0737|uyyfj00j0738|uyyfj00j0739|uyyfj00j0740|uyyfj00j0741|uyyfj00j0742|uyyfj00j0743|uyyfj00j0744|uyyfj00j0745|uyyfj00j0746|uyyfj00j0747|uyyfj00j0748|uyyfj00j0749|uyyfj00j0750|uyyfj00j0751|uyyfj00j0752|uyyfj00j0753|uyyfj00j0754|uyyfj00j0755|uyyfj00j0756|uyyfj00j0757|uyyfj00j0758|uyyfj00j0759|uyyfj00j0760|uyyfj00j0761|uyyfj00j0762|uyyfj00j0763|uyyfj00j0764|uyyfj00j0765|uyyfj00j0766|uyyfj00j0767|uyyfj00j0768|uyyfj00j0769|uyyfj00j0770|uyyfj00j0771|uyyfj00j0772|uyyfj00j0773|uyyfj00j0774|uyyfj00j0775|uyyfj00j0776|uyyfj00j0777|uyyfj00j0778|uyyfj00j0779|uyyfj00j0780|uyyfj00j0781|uyyfj00j0782|uyyff00j0011|uyyff00j0031|uyyff00j0032|uyyff00j0033|uyyff00j0034|uyyff99j0012|uyefj00j0071|uyefj00j0455|uyefj00j0456|uyefj00j0582|uyefj00j0583|uyefj00j0584|uyefj00j0585|uyefj00j0586|uyefj00j0590|uyeff00j0188|xyrly-f-jyy-y01|xyrly-f-jyy-y02|xyrly-f-jyy-y03|xyrly-f-jyy-y04|xyrly-f-jyy-y05|xyrly-f-jyy-y06|xyrly-f-jyy-y07|xyrly-f-jyy-y08|xyrly-f-jyy-y09|xyrly-f-jyy-y10|xyrly-f-jyy-y11|xyrly-f-jyy-y12|xyrly-f-jyy-y13|xyrly-f-jyy-y14|xyrly-f-jyy-y15|xyrly-f-jyy-y16|xyrly-f-url-y01|xyrly-f-url-y02|yyefj97j0005|ybyfcy4000|ybyfcy4001|ayefj99j0035|by-b-y-bzu-l01|by-b-y-bzu-l02|by-b-e-079|by-b-e-080|by-b-e-082|by-b-e-083|byefj72j0002|byefj73j0002|byefj74j0002|byefj75j0002|byefj76j0002|byefj77j0002|byefj78j0002|byefj79j0002|byefj91j0007|byefj92j0007|byefj98j0003|byefj99j0003|byefj99j0005|byefj99j0006|byeff88j0002|byeff89j0002|byeff90j0002|byeff91j0002|byeff92j0002|byeff93j0002|byeff96j0003|byeff97j0003|byeff98j0003|byeff99j0003|fymfj98j0001|fymfj99j0001|fyyaj98k0297|fyyaj99k0297|fyyfj00j0109|fyyfj00j0110|fyyfj00j0122|fyyfj00j0123|fyyfj00j0201|fyyfj00j0202|fyyfj00j0207|fyyfj00j0208|fyyfj00j0227|fyyfj00j0228|fyyfj00j0229|fyyfj00j0230|fyyfj00j0231|fyyfj00j0232|fyyfj00j0233|fyyfj00j0234|fyyfj00j0235|fyyfj00j0236|fyyfj00j0237|fyyfj00j0238|fyyfj00j0239|fyyfj00j0240|fyyfj00j0241|fyyfj00j0242|fyyfj00j0243|fyyfj00j0244|fyyfj00j0245|fyyfj00j0246|fyyfj00j0247|fyyfj00j0248|fyyfj00j0249|fyyfj00j0250|fyyfj00j0251|fyyfj00j0252|fyyfj00j0253|fyyfj00j0254|fyyfj00j0255|fyyfj00j0256|fyyfj00j0257|fyyfj00j0258|fyyfj00j0259|fyyfj00j0260|fyyfj00j0261|fyyfj00j0262|fyyfj00j0263|fyyfj00j0264|fyyfj00j0265|fyyfj00j0266|fyyfj00j0267|fyyfj00j0268|fyyfj00j0290|fyyfj00j0291|fyyfj00j0292|fyyfj00j0293|fyyfj00j0294|fyyfj00j0295|fyyfj00j0296|fyyfj00j0297|fyyfj00j0298|fyyfj00j0299|fyyfj00j0300|fyyfj00j0301|fyyfj00j0302|fyyfj00j0303|fyyfj00j0304|fyyfj00j0305|fyyfj00j0306|fyyfj00j0307|fyyfj00j0308|fyyfj00j0309|fyyfj00j0310|fyyfj00j0311|fyyfj00j0312|fyyfj00j0313|fyyfj00j0314|fyyfj00j0315|fyyfj00j0316|fyyfj00j0317|fyyfj00j0318|fyyfj00j0319|fyyfj00j0320|fyyfj00j0321|fyyfj00j0322|fyyfj00j0323|fyyfj00j0324|fyyfj00j0325|fyyfj00j0326|fyyfj00j0327|fyyfj00j0328|fyyfj00j0329|fyyfj00j0330|fyyfj00j0331|fyyfj00j0332|fyyfj00j0333|fyyfj00j0334|fyyfj00j0335|fyyfj00j0340|fyyfj00j0341|fyyfj00j0342|fyyfj00j0343|fyyfj00j0344|fyyfj00j0345|fyyfj00j0346|fyyfj00j0347|fyyfj00j0348|fyyfj00j0349|fyyfj00j0367|fyyfj00j0368|fyyfj00j0369|fyyfj00j0370|fyyfj00j0371|fyyfj00j0372|fyyfj00j0373|fyyfj00j0374|fyyfj00j0375|fyyfj00j0376|fyyfj00j0377|fyyfj00j0378|fyyfj00j0379|fyyfj00j0380|fyyfj00j0381|fyyfj00j0382|fyyfj00j0383|fyyfj00j0384|fyyfj00j0385|fyyfj00j0386|fyyfj00j0387|fyyfj00j0388|fyyfj00j0415|fyyfj00j0416|fyyfj00j0417|fyyfj00j0418|fyyfj00j0419|fyyfj00j0420|fyyfj00j0421|fyyfj00j0422|fyyfj00j0423|fyyfj00j0424|fyyfj00j0425|fyyfj00j0426|fyyfj00j0427|fyyfj00j0428|fyyfj00j0429|fyyfj00j0430|fyyfj00j0431|fyyfj00j0432|fyyfj00j0433|fyyfj00j0434|fyyfj00j0435|fyyfj00j0436|fyyfj00j0437|fyyfj00j0438|fyyfj00j0439|fyyfj00j0440|fyyfj00j0441|fyyfj00j0446|fyyfj00j0447|fyyfj00j0448|fyyfj00j0449|fyyfj00j0451|fyyfj00j0452|fyyfj00j0453|fyyfj00j0454|fyyfj00j0455|fyyfj00j0456|fyyfj00j0457|fyyfj00j0459|fyyfj00j0460|fyyfj00j0461|fyyfj00j0462|fyyfj00j0463|fyyfj00j0464|fyyfj00j0465|fyyfj00j0466|fyyfj00j0467|fyyfj00j0468|fyyfj00j0469|fyyfj00j0470|fyyfj00j0471|fyyfj00j0474|fyyfj00j0475|fyyfj00j0476|fyyfj00j0477|fyyfj00j0478|fyyfj00j0479|fyyfj00j0480|fyyfj00j0481|fyyfj00j0482|fyyfj00j0483|fyyfj00j0484|fyyfj00j0485|fyyfj00j0486|fyyfj00j0487|fyyfj00j0488|fyyfj00j0489|fyyfj00j0490|fyyfj00j0491|fyyfj00j0492|fyyfj00j0493|fyyfj00j0494|fyyfj00j0495|fyyfj00j0496|fyyfj00j0497|fyyfj00j0498|fyyfj00j0499|fyyfj00j0500|fyyfj00j0501|fyyfj00j0502|fyyfj00j0503|fyyfj00j0504|fyyfj00j0505|fyyfj00j0506|fyyfj00j0507|fyyfj00j0508|fyyfj00j0509|fyyfj00j0510|fyyfj00j0511|fyyfj00j0512|fyyfj00j0513|fyyfj00j0514|fyyfj00j0515|fyyfj00j0516|fyyfj00j0517|fyyfj00j0518|fyyfj00j0521|fyyfj00j0522|fyyfj00j0523|fyyfj00j0524|fyyfj00j0526|fyyfj00j0527|fyyfj00j0528|fyyfj00j0529|fyyfj00j0530|fyyfj00j0531|fyyfj00j0532|fyyfj00j0533|fyyfj00j0534|fyyfj00j0535|fyyfj00j0536|fyyfj00j0537|fyyfj00j0538|fyyfj00j0539|fyyfj00j0540|fyyfj00j0541|fyyfj00j0542|fyyfj00j0543|fyyfj00j0544|fyyfj00j0545|fyyfj00j0546|fyyfj00j0564|fyyfj00j0565|fyyfj00j0566|fyyfj00j0567|fyyfj00j0568|fyyfj00j0569|fyyfj00j0570|fyyfj00j0571|fyyfj00j0572|fyyfj00j0574|fyyfj00j0575|fyyfj00j0576|fyyfj00j0577|fyyfj00j0578|fyyfj00j0579|fyyfj00j0580|fyyfj01j0473|fyyfj02j0473|fyyfj36j0289|fyyfj37j0209|fyyfj37j0289|fyyfj38j0209|fyyfj38j0289|fyyfj39j0209|fyyfj39j0289|fyyfj40j0209|fyyfj40j0289|fyyfj41j0209|fyyfj41j0289|fyyfj42j0209|fyyfj42j0289|fyyfj43j0209|fyyfj43j0289|fyyfj44j0209|fyyfj44j0289|fyyfj45j0104|fyyfj45j0209|fyyfj45j0289|fyyfj46j0104|fyyfj46j0209|fyyfj46j0289|fyyfj47j0104|fyyfj47j0209|fyyfj47j0289|fyyfj48j0104|fyyfj48j0209|fyyfj48j0289|fyyfj49j0104|fyyfj49j0209|fyyfj49j0289|fyyfj50j0104|fyyfj50j0209|fyyfj50j0289|fyyfj50j0500|fyyfj51j0104|fyyfj51j0209|fyyfj51j0289|fyyfj51j0500|fyyfj52j0104|fyyfj52j0209|fyyfj52j0289|fyyfj52j0500|fyyfj53j0104|fyyfj53j0209|fyyfj53j0289|fyyfj53j0500|fyyfj54j0104|fyyfj54j0209|fyyfj54j0289|fyyfj54j0500|fyyfj55j0104|fyyfj55j0209|fyyfj55j0289|fyyfj55j0500|fyyfj56j0104|fyyfj56j0209|fyyfj56j0289|fyyfj56j0500|fyyfj57j0104|fyyfj57j0209|fyyfj57j0289|fyyfj57j0500|fyyfj58j0104|fyyfj58j0209|fyyfj58j0289|fyyfj58j0500|fyyfj59j0104|fyyfj59j0209|fyyfj59j0289|fyyfj59j0500|fyyfj60j0104|fyyfj60j0209|fyyfj60j0289|fyyfj60j0500|fyyfj61j0104|fyyfj61j0209|fyyfj61j0289|fyyfj61j0500|fyyfj62j0104|fyyfj62j0209|fyyfj62j0289|fyyfj62j0500|fyyfj63j0104|fyyfj63j0209|fyyfj63j0289|fyyfj63j0500|fyyfj64j0104|fyyfj64j0107|fyyfj64j0209|fyyfj64j0289|fyyfj64j0500|fyyfj64j0573|fyyfj65j0104|fyyfj65j0107|fyyfj65j0209|fyyfj65j0289|fyyfj65j0500|fyyfj65j0573|fyyfj66j0104|fyyfj66j0107|fyyfj66j0209|fyyfj66j0289|fyyfj66j0500|fyyfj66j0573|fyyfj67j0104|fyyfj67j0107|fyyfj67j0209|fyyfj67j0289|fyyfj67j0500|fyyfj67j0573|fyyfj68j0104|fyyfj68j0107|fyyfj68j0209|fyyfj68j0289|fyyfj68j0500|fyyfj68j0573|fyyfj69j0104|fyyfj69j0107|fyyfj69j0209|fyyfj69j0289|fyyfj69j0500|fyyfj69j0573|fyyfj70j0104|fyyfj70j0107|fyyfj70j0209|fyyfj70j0289|fyyfj70j0472|fyyfj70j0500|fyyfj70j0573|fyyfj71j0104|fyyfj71j0107|fyyfj71j0209|fyyfj71j0289|fyyfj71j0472|fyyfj71j0500|fyyfj71j0573|fyyfj72j0104|fyyfj72j0107|fyyfj72j0209|fyyfj72j0289|fyyfj72j0472|fyyfj72j0500|fyyfj72j0573|fyyfj73j0104|fyyfj73j0107|fyyfj73j0209|fyyfj73j0289|fyyfj73j0472|fyyfj73j0500|fyyfj73j0573|fyyfj74j0104|fyyfj74j0107|fyyfj74j0209|fyyfj74j0289|fyyfj74j0472|fyyfj74j0500|fyyfj74j0573|fyyfj75j0104|fyyfj75j0107|fyyfj75j0108|fyyfj75j0209|fyyfj75j0289|fyyfj75j0472|fyyfj75j0500|fyyfj75j0573|fyyfj76j0104|fyyfj76j0107|fyyfj76j0108|fyyfj76j0209|fyyfj76j0289|fyyfj76j0472|fyyfj76j0500|fyyfj76j0573|fyyfj77j0104|fyyfj77j0107|fyyfj77j0108|fyyfj77j0209|fyyfj77j0289|fyyfj77j0472|fyyfj77j0500|fyyfj77j0573|fyyfj78j0104|fyyfj78j0107|fyyfj78j0108|fyyfj78j0209|fyyfj78j0289|fyyfj78j0472|fyyfj78j0500|fyyfj78j0573|fyyfj79j0104|fyyfj79j0107|fyyfj79j0108|fyyfj79j0209|fyyfj79j0289|fyyfj79j0339|fyyfj79j0472|fyyfj79j0500|fyyfj79j0573|fyyfj80j0104|fyyfj80j0107|fyyfj80j0108|fyyfj80j0209|fyyfj80j0289|fyyfj80j0339|fyyfj80j0352|fyyfj80j0472|fyyfj80j0500|fyyfj80j0573|fyyfj81j0104|fyyfj81j0107|fyyfj81j0108|fyyfj81j0209|fyyfj81j0289|fyyfj81j0339|fyyfj81j0352|fyyfj81j0472|fyyfj81j0500|fyyfj81j0573|fyyfj82j0104|fyyfj82j0107|fyyfj82j0108|fyyfj82j0209|fyyfj82j0289|fyyfj82j0339|fyyfj82j0352|fyyfj82j0472|fyyfj82j0500|fyyfj82j0573|fyyfj83j0104|fyyfj83j0107|fyyfj83j0108|fyyfj83j0209|fyyfj83j0289|fyyfj83j0339|fyyfj83j0352|fyyfj83j0472|fyyfj83j0500|fyyfj83j0573|fyyfj84j0104|fyyfj84j0107|fyyfj84j0108|fyyfj84j0209|fyyfj84j0289|fyyfj84j0339|fyyfj84j0352|fyyfj84j0472|fyyfj84j0500|fyyfj84j0573|fyyfj85j0104|fyyfj85j0107|fyyfj85j0108|fyyfj85j0209|fyyfj85j0289|fyyfj85j0301|fyyfj85j0339|fyyfj85j0352|fyyfj85j0472|fyyfj85j0500|fyyfj85j0573|fyyfj86j0104|fyyfj86j0107|fyyfj86j0108|fyyfj86j0209|fyyfj86j0289|fyyfj86j0301|fyyfj86j0339|fyyfj86j0352|fyyfj86j0472|fyyfj86j0500|fyyfj86j0573|fyyfj87j0067|fyyfj87j0104|fyyfj87j0107|fyyfj87j0108|fyyfj87j0209|fyyfj87j0289|fyyfj87j0301|fyyfj87j0339|fyyfj87j0352|fyyfj87j0472|fyyfj87j0500|fyyfj87j0573|fyyfj88j0067|fyyfj88j0104|fyyfj88j0107|fyyfj88j0108|fyyfj88j0209|fyyfj88j0289|fyyfj88j0301|fyyfj88j0339|fyyfj88j0352|fyyfj88j0472|fyyfj88j0500|fyyfj88j0573|fyyfj89j0067|fyyfj89j0104|fyyfj89j0107|fyyfj89j0108|fyyfj89j0209|fyyfj89j0289|fyyfj89j0301|fyyfj89j0339|fyyfj89j0352|fyyfj89j0358|fyyfj89j0472|fyyfj89j0500|fyyfj89j0573|fyyfj90j0067|fyyfj90j0104|fyyfj90j0107|fyyfj90j0108|fyyfj90j0209|fyyfj90j0289|fyyfj90j0301|fyyfj90j0321|fyyfj90j0339|fyyfj90j0352|fyyfj90j0358|fyyfj90j0452|fyyfj90j0472|fyyfj90j0500|fyyfj90j0573|fyyfj91j0067|fyyfj91j0104|fyyfj91j0107|fyyfj91j0108|fyyfj91j0209|fyyfj91j0289|fyyfj91j0301|fyyfj91j0321|fyyfj91j0339|fyyfj91j0352|fyyfj91j0358|fyyfj91j0452|fyyfj91j0472|fyyfj91j0500|fyyfj91j0573|fyyfj92j0067|fyyfj92j0104|fyyfj92j0107|fyyfj92j0108|fyyfj92j0209|fyyfj92j0289|fyyfj92j0301|fyyfj92j0321|fyyfj92j0339|fyyfj92j0352|fyyfj92j0358|fyyfj92j0452|fyyfj92j0472|fyyfj92j0500|fyyfj92j0573|fyyfj93j0067|fyyfj93j0099|fyyfj93j0104|fyyfj93j0107|fyyfj93j0108|fyyfj93j0209|fyyfj93j0289|fyyfj93j0301|fyyfj93j0321|fyyfj93j0352|fyyfj93j0358|fyyfj93j0452|fyyfj93j0472|fyyfj93j0500|fyyfj93j0573|fyyfj94j0067|fyyfj94j0099|fyyfj94j0104|fyyfj94j0107|fyyfj94j0108|fyyfj94j0209|fyyfj94j0211|fyyfj94j0289|fyyfj94j0301|fyyfj94j0321|fyyfj94j0352|fyyfj94j0358|fyyfj94j0359|fyyfj94j0452|fyyfj94j0472|fyyfj94j0500|fyyfj94j0573|fyyfj95j0067|fyyfj95j0099|fyyfj95j0104|fyyfj95j0107|fyyfj95j0108|fyyfj95j0209|fyyfj95j0211|fyyfj95j0289|fyyfj95j0298|fyyfj95j0301|fyyfj95j0321|fyyfj95j0339|fyyfj95j0352|fyyfj95j0358|fyyfj95j0359|fyyfj95j0414|fyyfj95j0452|fyyfj95j0472|fyyfj95j0500|fyyfj95j0573|fyyfj96j0067|fyyfj96j0099|fyyfj96j0104|fyyfj96j0107|fyyfj96j0108|fyyfj96j0209|fyyfj96j0211|fyyfj96j0289|fyyfj96j0298|fyyfj96j0301|fyyfj96j0321|fyyfj96j0339|fyyfj96j0352|fyyfj96j0358|fyyfj96j0359|fyyfj96j0414|fyyfj96j0452|fyyfj96j0472|fyyfj96j0500|fyyfj96j0573|fyyfj97j0067|fyyfj97j0099|fyyfj97j0100|fyyfj97j0104|fyyfj97j0107|fyyfj97j0108|fyyfj97j0209|fyyfj97j0211|fyyfj97j0289|fyyfj97j0298|fyyfj97j0301|fyyfj97j0321|fyyfj97j0339|fyyfj97j0352|fyyfj97j0358|fyyfj97j0359|fyyfj97j0414|fyyfj97j0445|fyyfj97j0452|fyyfj97j0472|fyyfj97j0500|fyyfj97j0573|fyyfj98j0067|fyyfj98j0099|fyyfj98j0100|fyyfj98j0104|fyyfj98j0107|fyyfj98j0108|fyyfj98j0178|fyyfj98j0209|fyyfj98j0211|fyyfj98j0289|fyyfj98j0298|fyyfj98j0301|fyyfj98j0303|fyyfj98j0321|fyyfj98j0339|fyyfj98j0352|fyyfj98j0358|fyyfj98j0359|fyyfj98j0413|fyyfj98j0414|fyyfj98j0445|fyyfj98j0452|fyyfj98j0472|fyyfj98j0500|fyyfj98j0573|fyyfj99j0067|fyyfj99j0099|fyyfj99j0100|fyyfj99j0104|fyyfj99j0107|fyyfj99j0108|fyyfj99j0131|fyyfj99j0209|fyyfj99j0211|fyyfj99j0285|fyyfj99j0289|fyyfj99j0298|fyyfj99j0301|fyyfj99j0303|fyyfj99j0321|fyyfj99j0339|fyyfj99j0352|fyyfj99j0358|fyyfj99j0359|fyyfj99j0413|fyyfj99j0414|fyyfj99j0445|fyyfj99j0452|fyyfj99j0472|fyyfj99j0500|fyyfj99j0573|fyyfm01j0064|fyyfm01j0070|fyyfm01j0071|fyyfm01j0088|fyyfm01j0091|fyyfm01j0108|fyyfm01j0111|fyyfm01j0112|fyyfm01j0114|fyyfm01j0115|fyyfm01j0133|fyyfm01j0140|fyyfm01j0141|fyyfm01j0142|fyyfm01j0143|fyyfm01j0148|fyyfm01j0149|fyyfm01j0152|fyyfm01j0153|fyyfm01j0155|fyyfm01j0159|fyyfm01j0160|fyyfm01j0163|fyyfm01j0165|fyyfm01j0168|fyyfm01j0169|fyyfm01j0221|fyyfm01j0223|fyyfm01j0268|fyyfm01j0271|fyyfm01j0285|fyyfm01j0299|fyyfm01j0320|fyyfm01j0321|fyyfm01j0360|fyyfm01j0369|fyyfm01j0400|fyyfm01j0401|fyyfm01j0411|fyyfm01j0572|fyyfm01j0765|fyyfm02j0064|fyyfm02j0069|fyyfm02j0070|fyyfm02j0071|fyyfm02j0088|fyyfm02j0091|fyyfm02j0108|fyyfm02j0111|fyyfm02j0112|fyyfm02j0114|fyyfm02j0115|fyyfm02j0133|fyyfm02j0140|fyyfm02j0141|fyyfm02j0142|fyyfm02j0143|fyyfm02j0148|fyyfm02j0149|fyyfm02j0152|fyyfm02j0153|fyyfm02j0155|fyyfm02j0159|fyyfm02j0160|fyyfm02j0163|fyyfm02j0165|fyyfm02j0168|fyyfm02j0169|fyyfm02j0221|fyyfm02j0223|fyyfm02j0268|fyyfm02j0271|fyyfm02j0285|fyyfm02j0299|fyyfm02j0320|fyyfm02j0321|fyyfm02j0360|fyyfm02j0369|fyyfm02j0400|fyyfm02j0572|fyyfm02j0765|fyyfm03j0064|fyyfm03j0070|fyyfm03j0091|fyyfm03j0108|fyyfm03j0111|fyyfm03j0115|fyyfm03j0160|fyyfm03j0165|fyyfm03j0299|fyyfm03j0400|fyyfm03j0572|fyyfm04j0111|fyyfm51j0064|fyyfm51j0369|fyyfm52j0064|fyyfm52j0369|fyyfr88j0003|fyyfr89j0003|fyyff98j0071|fyyff98j0303|fyyff99j0029|fyyff99j0303|fyefj00j0112|fyefj00j0545|fyefj00j0546|fyefj00j0633|fyefj00j0634|fyefj00j0635|fyefj00j0636|fyefj00j0637|fyefj00j0649|fyefj00j0651|fyefj00j0652|fyefj00j0656|fyefj00j0657|fyefj00j0658|fyefj00j0659|fyefj00j0660|fyefj00j0685|fyefj00j0686|fyefj00j0688|fyefj00j0701|fyefj00j0702|fyefj00j0703|fyefj00j0715|fyefj00j0720|fyefj00j0721|fyefj00j0722|fyefj00j0724|fyefj00j0725|fyefj00j0726|fyefj00j0731|fyefj00j0751|fyefj00j0752|fyefj00j0756|fyefj00j0757|fyefj00j0758|fyefj00j0759|fyefj00j0761|fyefj00j0762|fyefj00j0763|fyefj00j0764|fyefj00j0768|fyefj00j0769|fyefj00j0785|fyefj00j0786|fyefj00j0789|fyefj00j0790|fyefj00j0793|fyefj00j0794|fyefj00j0803|fyefj00j0811|fyefj00j0821|fyefj00j0822|fyefj00j0823|fyefj00j0824|fyefj00j0825|fyefj00j0826|fyefj00j0827|fyefj00j0828|fyefj00j0829|fyefj00j0831|fyefj00j0832|fyefj00j0833|fyefj00j0838|fyefj00j0839|fyefj00j0840|fyefj00j0854|fyefj00j0855|fyefj00j0856|fyefj00j0859|fyefj00j0860|fyefj00j0861|fyefj00j0869|fyefj00j0870|fyefj00j0879|fyefj00j0887|fyefj00j0888|fyefj00j0889|fyefj00j0900|fyefj00j0901|fyefj00j0903|fyefj00j0904|fyefj00j0905|fyefj00j0959|fyefj00j0960|fyefj00j0961|fyefj00j1004|fyefj00j1005|fyefj00j1012|fyefj00j1013|fyefj00j1014|fyefj00j1015|fyefj00j1016|fyefj00j1017|fyefj00j1018|fyefj00j1019|fyefj00j1020|fyefj00j1021|fyefj00j1218|fyefj00j1219|fyefj00j1220|fyefj00j1221|fyefj00j1222|fyefj00j1811|fyefj00j1854|fyefj00j1855|fyefj00j1856|fyefj01j0707|fyefj02j0707|fyefj03j0707|fyefj66j0001|fyefj67j0001|fyefj68j0001|fyefj68j1064|fyefj69j0001|fyefj69j1064|fyefj70j0001|fyefj70j0859|fyefj70j1064|fyefj71j0001|fyefj71j1064|fyefj72j0001|fyefj72j1064|fyefj73j0001|fyefj73j1064|fyefj74j0001|fyefj74j1064|fyefj75j0001|fyefj75j1064|fyefj75j1092|fyefj76j0001|fyefj76j1064|fyefj76j1092|fyefj77j0001|fyefj77j1064|fyefj77j1092|fyefj78j0001|fyefj78j1064|fyefj78j1092|fyefj79j0001|fyefj79j1064|fyefj79j1092|fyefj80j0001|fyefj80j0859|fyefj80j1064|fyefj80j1077|fyefj80j1092|fyefj81j0001|fyefj81j1064|fyefj81j1077|fyefj81j1092|fyefj82j0001|fyefj82j1064|fyefj82j1092|fyefj83j0001|fyefj83j1064|fyefj83j1092|fyefj84j0001|fyefj84j1064|fyefj84j1092|fyefj85j0001|fyefj85j0356|fyefj85j1064|fyefj85j1092|fyefj86j0001|fyefj86j0356|fyefj86j1064|fyefj87j0001|fyefj87j0356|fyefj87j1064|fyefj88j0001|fyefj88j0356|fyefj88j1064|fyefj89j0001|fyefj89j0356|fyefj89j1064|fyefj89j1067|fyefj90j0001|fyefj90j0758|fyefj90j1021|fyefj90j1064|fyefj90j1067|fyefj91j0001|fyefj91j0758|fyefj91j0791|fyefj91j1021|fyefj91j1064|fyefj91j1067|fyefj91j1077|fyefj92j0001|fyefj92j0359|fyefj92j0678|fyefj92j0758|fyefj92j0791|fyefj92j0867|fyefj92j1021|fyefj92j1064|fyefj92j1077|fyefj93j0001|fyefj93j0359|fyefj93j0678|fyefj93j0758|fyefj93j0791|fyefj93j0867|fyefj93j1010|fyefj93j1021|fyefj93j1049|fyefj93j1064|fyefj93j1077|fyefj94j0001|fyefj94j0678|fyefj94j0758|fyefj94j0791|fyefj94j0867|fyefj94j1010|fyefj94j1021|fyefj94j1049|fyefj94j1064|fyefj94j1070|fyefj94j1077|fyefj94j1085|fyefj95j0001|fyefj95j0678|fyefj95j0758|fyefj95j0791|fyefj95j0867|fyefj95j0965|fyefj95j0966|fyefj95j1010|fyefj95j1011|fyefj95j1021|fyefj95j1055|fyefj95j1064|fyefj95j1069|fyefj95j1077|fyefj95j1085|fyefj95j1089|fyefj96j0001|fyefj96j0106|fyefj96j0671|fyefj96j0678|fyefj96j0758|fyefj96j0791|fyefj96j0814|fyefj96j0836|fyefj96j0867|fyefj96j0931|fyefj96j0965|fyefj96j0966|fyefj96j0976|fyefj96j1010|fyefj96j1021|fyefj96j1051|fyefj96j1055|fyefj96j1064|fyefj96j1068|fyefj96j1070|fyefj96j1077|fyefj96j1079|fyefj96j1081|fyefj96j1086|fyefj96j1088|fyefj96j1091|fyefj96j1093|fyefj96j1094|fyefj97j0001|fyefj97j0106|fyefj97j0584|fyefj97j0586|fyefj97j0671|fyefj97j0678|fyefj97j0758|fyefj97j0791|fyefj97j0814|fyefj97j0825|fyefj97j0836|fyefj97j0863|fyefj97j0865|fyefj97j0867|fyefj97j0914|fyefj97j0931|fyefj97j0952|fyefj97j0965|fyefj97j0966|fyefj97j0969|fyefj97j0971|fyefj97j0972|fyefj97j0976|fyefj97j0985|fyefj97j1010|fyefj97j1021|fyefj97j1051|fyefj97j1052|fyefj97j1055|fyefj97j1058|fyefj97j1059|fyefj97j1064|fyefj97j1068|fyefj97j1077|fyefj97j1079|fyefj97j1081|fyefj97j1086|fyefj97j1088|fyefj97j1095|fyefj98j0001|fyefj98j0243|fyefj98j0326|fyefj98j0329|fyefj98j0343|fyefj98j0344|fyefj98j0380|fyefj98j0472|fyefj98j0584|fyefj98j0586|fyefj98j0604|fyefj98j0671|fyefj98j0673|fyefj98j0676|fyefj98j0677|fyefj98j0678|fyefj98j0694|fyefj98j0758|fyefj98j0814|fyefj98j0825|fyefj98j0836|fyefj98j0863|fyefj98j0865|fyefj98j0867|fyefj98j0896|fyefj98j0898|fyefj98j0901|fyefj98j0906|fyefj98j0910|fyefj98j0913|fyefj98j0914|fyefj98j0922|fyefj98j0931|fyefj98j0934|fyefj98j0936|fyefj98j0951|fyefj98j0952|fyefj98j0963|fyefj98j0965|fyefj98j0966|fyefj98j0969|fyefj98j0971|fyefj98j0972|fyefj98j0974|fyefj98j0975|fyefj98j0976|fyefj98j0977|fyefj98j0978|fyefj98j0985|fyefj98j0992|fyefj98j1008|fyefj98j1009|fyefj98j1010|fyefj98j1011|fyefj98j1012|fyefj98j1019|fyefj98j1021|fyefj98j1028|fyefj98j1034|fyefj98j1039|fyefj98j1046|fyefj98j1047|fyefj98j1048|fyefj98j1054|fyefj98j1055|fyefj98j1064|fyefj98j1068|fyefj98j1077|fyefj98j1079|fyefj98j1080|fyefj98j1081|fyefj98j1082|fyefj98j1084|fyefj98j1087|fyefj98j1088|fyefj98j1090|fyefj99j0010|fyefj99j0188|fyefj99j0243|fyefj99j0268|fyefj99j0280|fyefj99j0301|fyefj99j0329|fyefj99j0343|fyefj99j0344|fyefj99j0380|fyefj99j0552|fyefj99j0573|fyefj99j0584|fyefj99j0586|fyefj99j0604|fyefj99j0671|fyefj99j0673|fyefj99j0676|fyefj99j0677|fyefj99j0678|fyefj99j0694|fyefj99j0722|fyefj99j0757|fyefj99j0758|fyefj99j0771|fyefj99j0772|fyefj99j0804|fyefj99j0806|fyefj99j0809|fyefj99j0814|fyefj99j0825|fyefj99j0836|fyefj99j0862|fyefj99j0863|fyefj99j0865|fyefj99j0866|fyefj99j0867|fyefj99j0875|fyefj99j0896|fyefj99j0898|fyefj99j0901|fyefj99j0906|fyefj99j0907|fyefj99j0908|fyefj99j0910|fyefj99j0912|fyefj99j0913|fyefj99j0914|fyefj99j0921|fyefj99j0922|fyefj99j0923|fyefj99j0931|fyefj99j0934|fyefj99j0936|fyefj99j0937|fyefj99j0949|fyefj99j0951|fyefj99j0952|fyefj99j0962|fyefj99j0963|fyefj99j0965|fyefj99j0966|fyefj99j0969|fyefj99j0971|fyefj99j0972|fyefj99j0974|fyefj99j0975|fyefj99j0976|fyefj99j0977|fyefj99j0978|fyefj99j0982|fyefj99j0985|fyefj99j0986|fyefj99j0988|fyefj99j0991|fyefj99j0992|fyefj99j0995|fyefj99j0997|fyefj99j0999|fyefj99j1003|fyefj99j1006|fyefj99j1008|fyefj99j1009|fyefj99j1010|fyefj99j1011|fyefj99j1016|fyefj99j1019|fyefj99j1020|fyefj99j1021|fyefj99j1024|fyefj99j1026|fyefj99j1028|fyefj99j1031|fyefj99j1033|fyefj99j1034|fyefj99j1036|fyefj99j1039|fyefj99j1042|fyefj99j1045|fyefj99j1046|fyefj99j1048|fyefj99j1053|fyefj99j1054|fyefj99j1055|fyefj99j1061|fyefj99j1062|fyefj99j1063|fyefj99j1064|fyefj99j1068|fyefj99j1072|fyefj99j1076|fyefj99j1077|fyefj99j1079|fyefj99j1080|fyefj99j1081|fyefj99j1083|fyefj99j1084|fyefj99j1087|fyefj99j1088|fyefm00j0113|fyefm01j0057|fyefm01j0088|fyefm01j0091|fyefm01j0101|fyefm01j0104|fyefm01j0107|fyefm01j0112|fyefm01j0379|fyefm02j0057|fyefm02j0101|fyefm02j0104|fyefm02j0107|fyefm02j0112|fyefm02j0379|fyefm98j0066|fyefm99j0066|fyefm99j0090|fyefm99j0093|fyefm99j0110|fyefm99j0165|fyefm99j0208|fyefm99j0209|fyefm99j0295|fyefm99j0401|fyefm99j0402|fyefm99j0907|fyefm99j1054|fyefn98j0015|fyefn98j0024|fyefn98j0030|fyefn99j0015|fyefn99j0024|fyefn99j0030|fyefr94j0559|fyefr95j0559|fyefr96j0559|fyefr97j0559|fyefr98j0559|fyefr99j0012|fyefr99j0559|fyefb01305|fyeff00j0170|fyeff00j0224|fyeff00j0227|fyeff00j0228|fyeff00j0229|fyeff00j0280|fyeff00j0281|fyeff00j0282|fyeff00j0283|fyeff00j0288|fyeff00j0289|fyeff00j0331|fyeff00j0332|fyeff00j0333|fyeff00j0334|fyeff00j0335|fyeff00j0336|fyeff00j0337|fyeff00j0338|fyeff00j0346|fyeff00j0347|fyeff00j0348|fyeff00j0349|fyeff00j0350|fyeff00j0351|fyeff00j0357|fyeff00j0358|fyeff00j0371|fyeff00j0372|fyeff00j0396|fyeff00j0397|fyeff00j0424|fyeff00j0425|fyeff01j0416|fyeff02j0416|fyeff78j0418|fyeff79j0418|fyeff79j1051|fyeff80j1051|fyeff81j1051|fyeff82j1051|fyeff83j1051|fyeff84j1051|fyeff85j1051|fyeff86j1051|fyeff87j1051|fyeff88j0422|fyeff89j0422|fyeff90j0422|fyeff90j0434|fyeff90j0440|fyeff91j0422|fyeff91j0434|fyeff91j0440|fyeff92j0440|fyeff93j0440|fyeff93j1045|fyeff93j1067|fyeff94j0392|fyeff94j0440|fyeff94j0443|fyeff94j1045|fyeff94j1067|fyeff95j0219|fyeff95j0392|fyeff95j0439|fyeff95j0440|fyeff95j0443|fyeff96j0053|fyeff96j0219|fyeff96j0392|fyeff96j0429|fyeff96j0434|fyeff96j0950|fyeff96j1019|fyeff96j1028|fyeff97j0053|fyeff97j0178|fyeff97j0191|fyeff97j0219|fyeff97j0221|fyeff97j0258|fyeff97j0324|fyeff97j0355|fyeff97j0370|fyeff97j0377|fyeff97j0392|fyeff97j0429|fyeff97j0434|fyeff97j0950|fyeff97j1019|fyeff98j0053|fyeff98j0065|fyeff98j0101|fyeff98j0144|fyeff98j0156|fyeff98j0178|fyeff98j0191|fyeff98j0193|fyeff98j0196|fyeff98j0197|fyeff98j0209|fyeff98j0210|fyeff98j0211|fyeff98j0214|fyeff98j0215|fyeff98j0218|fyeff98j0219|fyeff98j0221|fyeff98j0258|fyeff98j0260|fyeff98j0279|fyeff98j0284|fyeff98j0295|fyeff98j0296|fyeff98j0298|fyeff98j0324|fyeff98j0355|fyeff98j0370|fyeff98j0376|fyeff98j0379|fyeff98j0381|fyeff98j0392|fyeff98j0401|fyeff98j0404|fyeff98j0405|fyeff98j0407|fyeff98j0411|fyeff98j0418|fyeff98j0421|fyeff98j0423|fyeff98j0433|fyeff98j0436|fyeff98j0673|fyeff98j0896|fyeff98j0950|fyeff98j0985|fyeff98j1012|fyeff99j0053|fyeff99j0065|fyeff99j0152|fyeff99j0156|fyeff99j0159|fyeff99j0178|fyeff99j0191|fyeff99j0193|fyeff99j0196|fyeff99j0197|fyeff99j0209|fyeff99j0210|fyeff99j0211|fyeff99j0214|fyeff99j0215|fyeff99j0218|fyeff99j0219|fyeff99j0220|fyeff99j0221|fyeff99j0260|fyeff99j0279|fyeff99j0284|fyeff99j0291|fyeff99j0295|fyeff99j0296|fyeff99j0297|fyeff99j0298|fyeff99j0324|fyeff99j0339|fyeff99j0355|fyeff99j0370|fyeff99j0376|fyeff99j0379|fyeff99j0381|fyeff99j0392|fyeff99j0401|fyeff99j0404|fyeff99j0405|fyeff99j0407|fyeff99j0410|fyeff99j0411|fyeff99j0413|fyeff99j0414|fyeff99j0415|fyeff99j0418|fyeff99j0421|fyeff99j0423|fyeff99j0436|fyeff99j0673|fyeff99j0896|fyeff99j0950|fyeff99j0962|fyeff99j0985|fyeff99j1010|fyeff99j1012|fyeff99j1028|fyeff99j1090|fyeff99j1370|fayfm01j0148|fayfm01j0149|fayfm01j0155|fayfm02j0148|fayfm02j0149|fayfm02j0155|faefj00j0594|faefj00j0595|faefj00j0596|faefj00j0597|faefj01j0707|faefj02j0707|faefj03j0707|faefj90j1023|faefj91j1023|faefj92j1023|faefj94j1056|faefj95j1023|faefj95j1056|faefj96j1056|faefj98j1038|faefj99j1078|fdeff99j9001|fdeff99j9002|gyefj99j0005",
+ // A long case sensitive alternation where each entry is sandwiched by .*
+ ".*zQPbMkNO.*|.*NNSPdvMi.*|.*iWuuSoAl.*|.*qbvKMimS.*|.*IecrXtPa.*|.*seTckYqt.*|.*NxnyHkgB.*|.*fIDlOgKb.*|.*UhlWIygH.*|.*OtNoJxHG.*|.*cUTkFVIV.*|.*mTgFIHjr.*|.*jQkoIDtE.*|.*PPMKxRXl.*|.*AwMfwVkQ.*|.*CQyMrTQJ.*|.*BzrqxVSi.*|.*nTpcWuhF.*|.*PertdywG.*|.*ZZDgCtXN.*|.*WWdDPyyE.*|.*uVtNQsKk.*|.*BdeCHvPZ.*|.*wshRnFlH.*|.*aOUIitIp.*|.*RxZeCdXT.*|.*CFZMslCj.*|.*AVBZRDxl.*|.*IzIGCnhw.*|.*ythYuWiz.*|.*oztXVXhl.*|.*VbLkwqQx.*|.*qvaUgyVC.*|.*VawUjPWC.*|.*ecloYJuj.*|.*boCLTdSU.*|.*uPrKeAZx.*|.*hrMWLWBq.*|.*JOnUNHRM.*|.*rYnujkPq.*|.*dDEdZhIj.*|.*DRrfvugG.*|.*yEGfDxVV.*|.*YMYdJWuP.*|.*PHUQZNWM.*|.*AmKNrLis.*|.*zTxndVfn.*|.*FPsHoJnc.*|.*EIulZTua.*|.*KlAPhdzg.*|.*ScHJJCLt.*|.*NtTfMzME.*|.*eMCwuFdo.*|.*SEpJVJbR.*|.*cdhXZeCx.*|.*sAVtBwRh.*|.*kVFEVcMI.*|.*jzJrxraA.*|.*tGLHTell.*|.*NNWoeSaw.*|.*DcOKSetX.*|.*UXZAJyka.*|.*THpMphDP.*|.*rizheevl.*|.*kDCBRidd.*|.*pCZZRqyu.*|.*pSygkitl.*|.*SwZGkAaW.*|.*wILOrfNX.*|.*QkwVOerj.*|.*kHOMxPDr.*|.*EwOVycJv.*|.*AJvtzQFS.*|.*yEOjKYYB.*|.*LizIINLL.*|.*JBRSsfcG.*|.*YPiUqqNl.*|.*IsdEbvee.*|.*MjEpGcBm.*|.*OxXZVgEQ.*|.*xClXGuxa.*|.*UzRCGFEb.*|.*buJbvfvA.*|.*IPZQxRet.*|.*oFYShsMc.*|.*oBHffuHO.*|.*bzzKrcBR.*|.*KAjzrGCl.*|.*IPUsAVls.*|.*OGMUMbIU.*|.*gyDccHuR.*|.*bjlalnDd.*|.*ZLWjeMna.*|.*fdsuIlxQ.*|.*dVXtiomV.*|.*XxedTjNg.*|.*XWMHlNoA.*|.*nnyqArQX.*|.*opfkWGhb.*|.*wYtnhdYb.*",
// A long case insensitive alternation.
"(?i:(zQPbMkNO|NNSPdvMi|iWuuSoAl|qbvKMimS|IecrXtPa|seTckYqt|NxnyHkgB|fIDlOgKb|UhlWIygH|OtNoJxHG|cUTkFVIV|mTgFIHjr|jQkoIDtE|PPMKxRXl|AwMfwVkQ|CQyMrTQJ|BzrqxVSi|nTpcWuhF|PertdywG|ZZDgCtXN|WWdDPyyE|uVtNQsKk|BdeCHvPZ|wshRnFlH|aOUIitIp|RxZeCdXT|CFZMslCj|AVBZRDxl|IzIGCnhw|ythYuWiz|oztXVXhl|VbLkwqQx|qvaUgyVC|VawUjPWC|ecloYJuj|boCLTdSU|uPrKeAZx|hrMWLWBq|JOnUNHRM|rYnujkPq|dDEdZhIj|DRrfvugG|yEGfDxVV|YMYdJWuP|PHUQZNWM|AmKNrLis|zTxndVfn|FPsHoJnc|EIulZTua|KlAPhdzg|ScHJJCLt|NtTfMzME|eMCwuFdo|SEpJVJbR|cdhXZeCx|sAVtBwRh|kVFEVcMI|jzJrxraA|tGLHTell|NNWoeSaw|DcOKSetX|UXZAJyka|THpMphDP|rizheevl|kDCBRidd|pCZZRqyu|pSygkitl|SwZGkAaW|wILOrfNX|QkwVOerj|kHOMxPDr|EwOVycJv|AJvtzQFS|yEOjKYYB|LizIINLL|JBRSsfcG|YPiUqqNl|IsdEbvee|MjEpGcBm|OxXZVgEQ|xClXGuxa|UzRCGFEb|buJbvfvA|IPZQxRet|oFYShsMc|oBHffuHO|bzzKrcBR|KAjzrGCl|IPUsAVls|OGMUMbIU|gyDccHuR|bjlalnDd|ZLWjeMna|fdsuIlxQ|dVXtiomV|XxedTjNg|XWMHlNoA|nnyqArQX|opfkWGhb|wYtnhdYb))",
"(?i:(AAAAAAAAAAAAAAAAAAAAAAAA|BBBBBBBBBBBBBBBBBBBBBBBB|cccccccccccccccccccccccC|ſſſſſſſſſſſſſſſſſſſſſſſſS|SSSSSSSSSSSSSSSSSSSSSSSSſ))",
@@ -422,6 +431,7 @@ func TestNewFastRegexMatcher(t *testing.T) {
{"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
{"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}},
{"f.?o", nil},
+ {".*foo.*|.*bar.*|.*baz.*", &containsStringMatcher{left: trueMatcher{}, substrings: []string{"foo", "bar", "baz"}, right: trueMatcher{}}},
} {
t.Run(c.pattern, func(t *testing.T) {
t.Parallel()
diff --git a/promql/bench_test.go b/promql/bench_test.go
index cba86a7aec..9f0de52ec8 100644
--- a/promql/bench_test.go
+++ b/promql/bench_test.go
@@ -342,7 +342,7 @@ func BenchmarkRangeQuery(b *testing.B) {
Reg: nil,
MaxSamples: 50000000,
Timeout: 100 * time.Second,
- Parser: parser.NewParser(parser.Options{EnableExtendedRangeSelectors: true}),
+ Parser: parser.NewParser(parser.Options{EnableExtendedRangeSelectors: true, EnableExperimentalFunctions: true}),
}
engine := promqltest.NewTestEngineWithOpts(b, opts)
@@ -643,6 +643,7 @@ func BenchmarkInfoFunction(b *testing.B) {
Timeout: 100 * time.Second,
EnableAtModifier: true,
EnableNegativeOffset: true,
+ Parser: parser.NewParser(parser.Options{EnableExperimentalFunctions: true}),
}
engine := promql.NewEngine(opts)
b.Run(tc.name, func(b *testing.B) {
diff --git a/promql/engine.go b/promql/engine.go
index eb41e40605..bd7b868d86 100644
--- a/promql/engine.go
+++ b/promql/engine.go
@@ -1214,6 +1214,9 @@ type EvalNodeHelper struct {
// funcHistogramQuantile and funcHistogramFraction for classic histograms.
signatureToMetricWithBuckets map[string]*metricWithBuckets
nativeHistogramSamples []Sample
+ // funcHistogramQuantiles for histograms.
+ quantileStrs map[float64]string
+ signatureToLabelsWithQuantile map[string]map[float64]labels.Labels
lb *labels.Builder
lblBuf []byte
@@ -1305,6 +1308,35 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota
return annos
}
+func (enh *EvalNodeHelper) getOrCreateLblsWithQuantile(lbls labels.Labels, quantileLabel string, q float64) labels.Labels {
+ if enh.signatureToLabelsWithQuantile == nil {
+ enh.signatureToLabelsWithQuantile = make(map[string]map[float64]labels.Labels)
+ }
+
+ enh.lblBuf = lbls.Bytes(enh.lblBuf)
+ cachedLbls, ok := enh.signatureToLabelsWithQuantile[string(enh.lblBuf)]
+ if !ok {
+ cachedLbls = make(map[float64]labels.Labels, len(enh.quantileStrs))
+ enh.signatureToLabelsWithQuantile[string(enh.lblBuf)] = cachedLbls
+ }
+
+ cachedLblsWithQuantile, ok := cachedLbls[q]
+ if !ok {
+ quantileStr := "NaN"
+ if !math.IsNaN(q) {
+ // Cannot do map lookup by NaN key.
+ quantileStr = enh.quantileStrs[q]
+ }
+ cachedLblsWithQuantile = labels.NewBuilder(lbls).
+ Set(quantileLabel, quantileStr).
+ Labels()
+
+ cachedLbls[q] = cachedLblsWithQuantile
+ }
+
+ return cachedLblsWithQuantile
+}
+
// rangeEval evaluates the given expressions, and then for each step calls
// the given funcCall with the values computed for each expression at that
// step. The return value is the combination into time series of all the
@@ -4320,7 +4352,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) {
// further up (the latter wouldn't make sense,
// but no harm in detecting it).
n.SkipHistogramBuckets = true
- case "histogram_quantile", "histogram_fraction":
+ case "histogram_quantile", "histogram_quantiles", "histogram_fraction":
// If we ever see a function that needs the
// whole histogram, we will not skip the
// buckets.
diff --git a/promql/engine_test.go b/promql/engine_test.go
index f911419c62..5dfffd7cc7 100644
--- a/promql/engine_test.go
+++ b/promql/engine_test.go
@@ -94,11 +94,9 @@ func TestQueryConcurrency(t *testing.T) {
var wg sync.WaitGroup
for range maxConcurrency {
q := engine.NewTestQuery(f)
- wg.Add(1)
- go func() {
+ wg.Go(func() {
q.Exec(ctx)
- wg.Done()
- }()
+ })
select {
case <-processing:
// Expected.
@@ -108,11 +106,9 @@ func TestQueryConcurrency(t *testing.T) {
}
q := engine.NewTestQuery(f)
- wg.Add(1)
- go func() {
+ wg.Go(func() {
q.Exec(ctx)
- wg.Done()
- }()
+ })
select {
case <-processing:
diff --git a/promql/functions.go b/promql/functions.go
index 2cb90a9b6c..546f94df12 100644
--- a/promql/functions.go
+++ b/promql/functions.go
@@ -1720,8 +1720,8 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
inVec := vectorVals[1]
var annos annotations.Annotations
- if math.IsNaN(q) || q < 0 || q > 1 {
- annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange()))
+ if err := validateQuantile(q, args[0]); err != nil {
+ annos.Add(err)
}
annos.Merge(enh.resetHistograms(inVec, args[1]))
@@ -1770,6 +1770,89 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
return enh.Out, annos
}
+func validateQuantile(q float64, arg parser.Expr) error {
+ if math.IsNaN(q) || q < 0 || q > 1 {
+ return annotations.NewInvalidQuantileWarning(q, arg.PositionRange())
+ }
+ return nil
+}
+
+// === histogram_quantiles(Vector parser.ValueTypeVector, label parser.ValueTypeString, q0 parser.ValueTypeScalar, qs parser.ValueTypeScalar...) (Vector, Annotations) ===
+func funcHistogramQuantiles(vectorVals []Vector, _ Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ var (
+ inVec = vectorVals[0]
+ quantileLabel = args[1].(*parser.StringLiteral).Val
+ numQuantiles = len(vectorVals[2:])
+ qs = make([]float64, 0, numQuantiles)
+
+ annos annotations.Annotations
+ )
+
+ if enh.quantileStrs == nil {
+ enh.quantileStrs = make(map[float64]string, numQuantiles)
+ }
+ for i := 2; i < len(vectorVals); i++ {
+ q := vectorVals[i][0].F
+
+ if err := validateQuantile(q, args[i]); err != nil {
+ annos.Add(err)
+ }
+
+ if _, ok := enh.quantileStrs[q]; !ok {
+ enh.quantileStrs[q] = labels.FormatOpenMetricsFloat(q)
+ }
+ qs = append(qs, q)
+ }
+
+ annos.Merge(enh.resetHistograms(inVec, args[0]))
+
+ for _, q := range qs {
+ // Deal with the native histograms.
+ for _, sample := range enh.nativeHistogramSamples {
+ if sample.H == nil {
+ // Native histogram conflicts with classic histogram at the same timestamp, ignore.
+ continue
+ }
+ if !enh.enableDelayedNameRemoval {
+ sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel)
+ }
+ hq, hqAnnos := HistogramQuantile(q, sample.H, sample.Metric.Get(model.MetricNameLabel), args[0].PositionRange())
+ annos.Merge(hqAnnos)
+ enh.Out = append(enh.Out, Sample{
+ Metric: enh.getOrCreateLblsWithQuantile(sample.Metric, quantileLabel, q),
+ F: hq,
+ DropName: true,
+ })
+ }
+
+ // Deal with classic histograms that have already been filtered for conflicting native histograms.
+ for _, mb := range enh.signatureToMetricWithBuckets {
+ if len(mb.buckets) > 0 {
+ hq, forcedMonotonicity, _, minBucket, maxBucket, maxDiff := BucketQuantile(q, mb.buckets)
+ if forcedMonotonicity {
+ metricName := ""
+ if enh.enableDelayedNameRemoval {
+ metricName = getMetricName(mb.metric)
+ }
+ annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(metricName, args[1].PositionRange(), enh.Ts, minBucket, maxBucket, maxDiff))
+ }
+
+ if !enh.enableDelayedNameRemoval {
+ mb.metric = mb.metric.DropReserved(schema.IsMetadataLabel)
+ }
+
+ enh.Out = append(enh.Out, Sample{
+ Metric: enh.getOrCreateLblsWithQuantile(mb.metric, quantileLabel, q),
+ F: hq,
+ DropName: true,
+ })
+ }
+ }
+ }
+
+ return enh.Out, annos
+}
+
// pickFirstSampleIndex returns the index of the last sample before
// or at the range start, or 0 if none exist before the range start.
// If the vector selector is not anchored, it always returns 0, true.
@@ -2100,6 +2183,7 @@ var FunctionCalls = map[string]FunctionCall{
"histogram_count": funcHistogramCount,
"histogram_fraction": funcHistogramFraction,
"histogram_quantile": funcHistogramQuantile,
+ "histogram_quantiles": funcHistogramQuantiles,
"histogram_sum": funcHistogramSum,
"histogram_stddev": funcHistogramStdDev,
"histogram_stdvar": funcHistogramStdVar,
diff --git a/promql/info.go b/promql/info.go
index c5b88e6af3..97a79cd0f1 100644
--- a/promql/info.go
+++ b/promql/info.go
@@ -21,6 +21,7 @@ import (
"strings"
"github.com/grafana/regexp"
+ "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
@@ -46,24 +47,20 @@ func (ev *evaluator) evalInfo(ctx context.Context, args parser.Expressions) (par
labelSelector := args[1].(*parser.VectorSelector)
for _, m := range labelSelector.LabelMatchers {
dataLabelMatchers[m.Name] = append(dataLabelMatchers[m.Name], m)
- if m.Name == labels.MetricName {
+ if m.Name == model.MetricNameLabel {
infoNameMatchers = append(infoNameMatchers, m)
}
}
} else {
- infoNameMatchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, targetInfo)}
+ infoNameMatchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, targetInfo)}
}
// Don't try to enrich info series.
ignoreSeries := map[uint64]struct{}{}
-loop:
for _, s := range mat {
- name := s.Metric.Get(labels.MetricName)
- for _, m := range infoNameMatchers {
- if m.Matches(name) {
- ignoreSeries[s.Metric.Hash()] = struct{}{}
- continue loop
- }
+ name := s.Metric.Get(model.MetricNameLabel)
+ if len(infoNameMatchers) > 0 && matchersMatch(infoNameMatchers, name) {
+ ignoreSeries[s.Metric.Hash()] = struct{}{}
}
}
@@ -79,6 +76,15 @@ loop:
return res, annots
}
+func matchersMatch(matchers []*labels.Matcher, value string) bool {
+ for _, m := range matchers {
+ if !m.Matches(value) {
+ return false
+ }
+ }
+ return true
+}
+
// infoSelectHints calculates the storage.SelectHints for selecting info series, given expr (first argument to info call).
func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints {
var nodeTimestamp *int64
@@ -122,6 +128,19 @@ func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints {
// Series in ignoreSeries are not fetched.
// dataLabelMatchers may be mutated.
func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeries map[uint64]struct{}, dataLabelMatchers map[string][]*labels.Matcher, selectHints storage.SelectHints) (Matrix, annotations.Annotations, error) {
+ removeNameFromDataLabelMatchers := func() {
+ for name, ms := range dataLabelMatchers {
+ ms = slices.DeleteFunc(ms, func(m *labels.Matcher) bool {
+ return m.Name == model.MetricNameLabel
+ })
+ if len(ms) > 0 {
+ dataLabelMatchers[name] = ms
+ } else {
+ delete(dataLabelMatchers, name)
+ }
+ }
+ }
+
// A map of values for all identifying labels we are interested in.
idLblValues := map[string]map[string]struct{}{}
for _, s := range mat {
@@ -147,19 +166,7 @@ func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeri
// since it's not a data label selector (it's used to select which info metrics
// to consider). Without this, combineWithInfoVector would incorrectly exclude
// series when only __name__ is specified in the selector.
- for name, ms := range dataLabelMatchers {
- for i, m := range ms {
- if m.Name == labels.MetricName {
- ms = slices.Delete(ms, i, i+1)
- break
- }
- }
- if len(ms) > 0 {
- dataLabelMatchers[name] = ms
- } else {
- delete(dataLabelMatchers, name)
- }
- }
+ removeNameFromDataLabelMatchers()
return nil, nil, nil
}
@@ -183,24 +190,19 @@ func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeri
for name, re := range idLblRegexps {
infoLabelMatchers = append(infoLabelMatchers, labels.MustNewMatcher(labels.MatchRegexp, name, re))
}
- var nameMatcher *labels.Matcher
- for name, ms := range dataLabelMatchers {
- for i, m := range ms {
- if m.Name == labels.MetricName {
- nameMatcher = m
- ms = slices.Delete(ms, i, i+1)
+ hasNameMatcher := false
+ for _, ms := range dataLabelMatchers {
+ for _, m := range ms {
+ if m.Name == model.MetricNameLabel {
+ hasNameMatcher = true
}
infoLabelMatchers = append(infoLabelMatchers, m)
}
- if len(ms) > 0 {
- dataLabelMatchers[name] = ms
- } else {
- delete(dataLabelMatchers, name)
- }
}
- if nameMatcher == nil {
+ removeNameFromDataLabelMatchers()
+ if !hasNameMatcher {
// Default to using the target_info metric.
- infoLabelMatchers = append([]*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, targetInfo)}, infoLabelMatchers...)
+ infoLabelMatchers = append([]*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, targetInfo)}, infoLabelMatchers...)
}
infoIt := ev.querier.Select(ctx, false, &selectHints, infoLabelMatchers...)
@@ -220,7 +222,7 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat
sigFunction := func(name string) func(labels.Labels) string {
return func(lset labels.Labels) string {
lb.Reset()
- lb.Add(labels.MetricName, name)
+ lb.Add(model.MetricNameLabel, name)
lset.MatchLabels(true, identifyingLabels...).Range(func(l labels.Label) {
lb.Add(l.Name, l.Value)
})
@@ -232,7 +234,7 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat
infoMetrics := map[string]struct{}{}
for _, is := range infoMat {
lblMap := is.Metric.Map()
- infoMetrics[lblMap[labels.MetricName]] = struct{}{}
+ infoMetrics[lblMap[model.MetricNameLabel]] = struct{}{}
}
sigfs := make(map[string]func(labels.Labels) string, len(infoMetrics))
for name := range infoMetrics {
@@ -277,7 +279,7 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat
infoSigs := make(map[uint64]string, len(infoMat))
for _, s := range infoMat {
- name := s.Metric.Map()[labels.MetricName]
+ name := s.Metric.Map()[model.MetricNameLabel]
infoSigs[s.Metric.Hash()] = sigfs[name](s.Metric)
}
@@ -415,7 +417,7 @@ func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[u
}
err := is.Metric.Validate(func(l labels.Label) error {
- if l.Name == labels.MetricName {
+ if l.Name == model.MetricNameLabel {
return nil
}
if _, exists := dataLabelMatchers[l.Name]; len(dataLabelMatchers) > 0 && !exists {
diff --git a/promql/parser/functions.go b/promql/parser/functions.go
index c7c7332305..180a255ab0 100644
--- a/promql/parser/functions.go
+++ b/promql/parser/functions.go
@@ -205,6 +205,13 @@ var Functions = map[string]*Function{
ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector},
ReturnType: ValueTypeVector,
},
+ "histogram_quantiles": {
+ Name: "histogram_quantiles",
+ ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeScalar, ValueTypeScalar},
+ Variadic: 9,
+ ReturnType: ValueTypeVector,
+ Experimental: true,
+ },
"double_exponential_smoothing": {
Name: "double_exponential_smoothing",
ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar},
diff --git a/promql/parser/printer.go b/promql/parser/printer.go
index 44ca15e532..cc5c931975 100644
--- a/promql/parser/printer.go
+++ b/promql/parser/printer.go
@@ -38,7 +38,7 @@ func tree(node Node, level string) string {
typs := strings.Split(fmt.Sprintf("%T", node), ".")[1]
var t strings.Builder
- t.WriteString(fmt.Sprintf("%s |---- %s :: %s\n", level, typs, node))
+ fmt.Fprintf(&t, "%s |---- %s :: %s\n", level, typs, node)
level += " · · ·"
diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test
index 436390ee41..db7d5de230 100644
--- a/promql/promqltest/testdata/histograms.test
+++ b/promql/promqltest/testdata/histograms.test
@@ -598,6 +598,40 @@ eval instant at 50m histogram_quantile(1, testhistogram3_bucket)
{start="positive"} 1
{start="negative"} -0.1
+eval instant at 50m histogram_quantiles(testhistogram3, "q", 0, 0.25, 0.5, 0.75, 1)
+ expect no_warn
+ {q="0.0", start="positive"} 0
+ {q="0.0", start="negative"} -0.25
+ {q="0.25", start="positive"} 0.055
+ {q="0.25", start="negative"} -0.225
+ {q="0.5", start="positive"} 0.125
+ {q="0.5", start="negative"} -0.2
+ {q="0.75", start="positive"} 0.45
+ {q="0.75", start="negative"} -0.15
+ {q="1.0", start="positive"} 1
+ {q="1.0", start="negative"} -0.1
+
+eval instant at 50m histogram_quantiles(testhistogram3_bucket, "q", 0, 0.25, 0.5, 0.75, 1)
+ expect no_warn
+ {q="0.0", start="positive"} 0
+ {q="0.0", start="negative"} -0.25
+ {q="0.25", start="positive"} 0.055
+ {q="0.25", start="negative"} -0.225
+ {q="0.5", start="positive"} 0.125
+ {q="0.5", start="negative"} -0.2
+ {q="0.75", start="positive"} 0.45
+ {q="0.75", start="negative"} -0.15
+ {q="1.0", start="positive"} 1
+ {q="1.0", start="negative"} -0.1
+
+# Break label set uniqueness.
+
+eval instant at 50m histogram_quantiles(testhistogram3, "start", 0, 0.25, 0.5, 0.75, 1)
+ expect fail
+
+eval instant at 50m histogram_quantiles(testhistogram3_bucket, "start", 0, 0.25, 0.5, 0.75, 1)
+ expect fail
+
# Quantile too low.
eval instant at 50m histogram_quantile(-0.1, testhistogram)
@@ -610,6 +644,16 @@ eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
{start="positive"} -Inf
{start="negative"} -Inf
+eval instant at 50m histogram_quantiles(testhistogram, "q", -0.1)
+ expect warn
+ {q="-0.1", start="positive"} -Inf
+ {q="-0.1", start="negative"} -Inf
+
+eval instant at 50m histogram_quantiles(testhistogram_bucket, "q", -0.1)
+ expect warn
+ {q="-0.1", start="positive"} -Inf
+ {q="-0.1", start="negative"} -Inf
+
# Quantile too high.
eval instant at 50m histogram_quantile(1.01, testhistogram)
@@ -622,6 +666,16 @@ eval instant at 50m histogram_quantile(1.01, testhistogram_bucket)
{start="positive"} +Inf
{start="negative"} +Inf
+eval instant at 50m histogram_quantiles(testhistogram, "q", 1.01)
+ expect warn
+ {q="1.01", start="positive"} +Inf
+ {q="1.01", start="negative"} +Inf
+
+eval instant at 50m histogram_quantiles(testhistogram_bucket, "q", 1.01)
+ expect warn
+ {q="1.01", start="positive"} +Inf
+ {q="1.01", start="negative"} +Inf
+
# Quantile invalid.
eval instant at 50m histogram_quantile(NaN, testhistogram)
@@ -634,9 +688,22 @@ eval instant at 50m histogram_quantile(NaN, testhistogram_bucket)
{start="positive"} NaN
{start="negative"} NaN
+eval instant at 50m histogram_quantiles(testhistogram, "q", NaN)
+ expect warn
+ {q="NaN", start="positive"} NaN
+ {q="NaN", start="negative"} NaN
+
+eval instant at 50m histogram_quantiles(testhistogram_bucket, "q", NaN)
+ expect warn
+ {q="NaN", start="positive"} NaN
+ {q="NaN", start="negative"} NaN
+
eval instant at 50m histogram_quantile(NaN, non_existent)
expect warn msg: PromQL warning: quantile value should be between 0 and 1, got NaN
+eval instant at 50m histogram_quantiles(non_existent, "q", NaN)
+ expect warn msg: PromQL warning: quantile value should be between 0 and 1, got NaN
+
# Quantile value in lowest bucket.
eval instant at 50m histogram_quantile(0, testhistogram)
@@ -967,6 +1034,12 @@ eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket)
expect info
{} 979.75
+eval instant at 50m histogram_quantiles(nonmonotonic_bucket, "q", 0.01, 0.5, 0.99)
+ expect info
+ {q="0.01"} 0.0045
+ {q="0.5"} 8.5
+ {q="0.99"} 979.75
+
# Buckets with different representations of the same upper bound.
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m]))
{instance="ins1", job="job1"} 0.15
@@ -1002,9 +1075,15 @@ load_with_nhcb 5m
eval instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
expect fail
+eval instant at 50m histogram_quantiles({__name__=~"request_duration_seconds\\d*_bucket"}, "q", 0.99)
+ expect fail
+
eval instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})
expect fail
+eval instant at 50m histogram_quantiles({__name__=~"request_duration_seconds\\d*"}, "q", 0.99)
+ expect fail
+
# Histogram with constant buckets.
load_with_nhcb 1m
const_histogram_bucket{le="0.0"} 1 1 1 1 1
@@ -1066,7 +1145,7 @@ eval instant at 10m histogram_sum(increase(histogram_with_reset[15m]))
clear
-# Test histogram_quantile and histogram_fraction with conflicting classic and native histograms.
+# Test histogram_quantile(s) and histogram_fraction with conflicting classic and native histograms.
load 1m
series{host="a"} {{schema:0 sum:5 count:4 buckets:[9 2 1]}}
series{host="a", le="0.1"} 2
@@ -1081,6 +1160,11 @@ eval instant at 0 histogram_quantile(0.8, series)
expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "series"
# Should return no results.
+eval instant at 0 histogram_quantiles(series, "q", 0.1, 0.2)
+ expect no_info
+ expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "series"
+ # Should return no results.
+
eval instant at 0 histogram_fraction(-Inf, 1, series)
expect no_info
expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "series"
diff --git a/promql/promqltest/testdata/info.test b/promql/promqltest/testdata/info.test
index 9bc4ed0fbc..a3988abc64 100644
--- a/promql/promqltest/testdata/info.test
+++ b/promql/promqltest/testdata/info.test
@@ -79,6 +79,22 @@ eval range from 0m to 10m step 5m info(build_info, {__name__=~".+_info"})
clear
+load 5m
+ metric{instance="a", job="1", label="value"} 0 1 2
+ target_info{instance="a", job="1", data="info", another_data="another info"} 1 1 1
+ build_info{instance="a", job="1", build_data="build"} 1 1 1
+ target_build{instance="a", job="1", build_data="build"} 1 1 1
+
+# Multiple positive __name__ matchers.
+eval range from 0m to 10m step 5m info(metric, {__name__=~"target_.+", __name__=~".+_info"})
+ metric{instance="a", job="1", label="value", data="info", another_data="another info"} 0 1 2
+
+# A positive and a negative __name__ matcher.
+eval range from 0m to 10m step 5m info(metric, {__name__=~".+_info", __name__!~".*build.*"})
+ metric{instance="a", job="1", label="value", data="info", another_data="another info"} 0 1 2
+
+clear
+
# Overlapping target_info series.
load 5m
metric{instance="a", job="1", label="value"} 0 1 2
diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test
index 3b497e5ff4..40789b295a 100644
--- a/promql/promqltest/testdata/native_histograms.test
+++ b/promql/promqltest/testdata/native_histograms.test
@@ -55,6 +55,10 @@ eval instant at 1m histogram_quantile(0.5, single_histogram)
expect no_info
{} 1.414213562373095
+eval instant at 1m histogram_quantiles(single_histogram, "q", 0.5)
+ expect no_info
+ {q="0.5"} 1.414213562373095
+
clear
# Repeat the same histogram 10 times.
@@ -1605,6 +1609,11 @@ eval instant at 1m histogram_quantile(0.81, histogram_nan)
{case="100% NaNs"} NaN
{case="20% NaNs"} NaN
+eval instant at 1m histogram_quantiles(histogram_nan, "q", 0.81)
+ expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is NaN for metric name "histogram_nan"
+ {case="100% NaNs", q="0.81"} NaN
+ {case="20% NaNs", q="0.81"} NaN
+
eval instant at 1m histogram_quantile(0.8, histogram_nan{case="100% NaNs"})
expect info msg: PromQL info: input to histogram_quantile has NaN observations, result is NaN for metric name "histogram_nan"
{case="100% NaNs"} NaN
@@ -1891,6 +1900,9 @@ eval instant at 1m histogram_quantile(0.5, myHistogram2)
eval instant at 1m histogram_quantile(0.5, mixedHistogram)
expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "mixedHistogram"
+eval instant at 1m histogram_quantiles(mixedHistogram, "q", 0.5)
+ expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "mixedHistogram"
+
clear
# A counter reset only in a bucket. Sub-queries still need to detect
@@ -1960,6 +1972,9 @@ eval instant at 1m histogram_count(histogram unless histogram_quantile(0.5, hist
eval instant at 1m histogram_quantile(0.5, histogram unless histogram_count(histogram) == 0)
{} 3.1748021039363987
+eval instant at 1m histogram_quantiles(histogram unless histogram_count(histogram) == 0, "q", 0.5)
+ {q="0.5"} 3.1748021039363987
+
clear
# Regression test for:
diff --git a/renovate.json b/renovate.json
index 814193329a..c0490c5610 100644
--- a/renovate.json
+++ b/renovate.json
@@ -14,6 +14,7 @@
"github-actions": {
"managerFilePatterns": ["scripts/**"]
},
+ "prBodyNotes": ["```release-notes","NONE","```"],
"prConcurrentLimit": 20,
"prHourlyLimit": 5,
"packageRules": [
diff --git a/rules/manager_test.go b/rules/manager_test.go
index 1b9f4be7d5..19c815e50c 100644
--- a/rules/manager_test.go
+++ b/rules/manager_test.go
@@ -49,6 +49,7 @@ import (
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/teststorage"
prom_testutil "github.com/prometheus/prometheus/util/testutil"
+ "github.com/prometheus/prometheus/util/testutil/synctest"
)
func TestMain(m *testing.M) {
@@ -2010,306 +2011,306 @@ func TestDependencyMapUpdatesOnGroupUpdate(t *testing.T) {
func TestAsyncRuleEvaluation(t *testing.T) {
t.Run("synchronous evaluation with independent rules", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
+ ctx := t.Context()
- ruleManager := NewManager(optsFactory(storage, &maxInflight, &inflightQueries, 0))
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
+ ruleManager := NewManager(optsFactory(storage, &maxInflight, &inflightQueries, 0))
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
- expectedRuleCount := 6
- expectedSampleCount := 4
+ expectedRuleCount := 6
+ expectedSampleCount := 4
- for _, group := range groups {
- require.Len(t, group.rules, expectedRuleCount)
+ for _, group := range groups {
+ require.Len(t, group.rules, expectedRuleCount)
- start := time.Now()
- DefaultEvalIterationFunc(ctx, group, start)
+ start := time.Now()
+ DefaultEvalIterationFunc(ctx, group, start)
- // Expected evaluation order
- order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
- require.Nil(t, order)
+ // Expected evaluation order
+ order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
+ require.Nil(t, order)
- // Never expect more than 1 inflight query at a time.
- require.EqualValues(t, 1, maxInflight.Load())
- // Each rule should take at least 1 second to execute sequentially.
- require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
- // Each recording rule produces one vector.
- require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
- // Group duration is higher than the sum of rule durations (group overhead).
- require.GreaterOrEqual(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum())
- }
+ // Never expect more than 1 inflight query at a time.
+ require.EqualValues(t, 1, maxInflight.Load())
+ // Each rule should take at least 1 second to execute sequentially.
+ require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
+ // Each recording rule produces one vector.
+ require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ // Group duration is higher than the sum of rule durations (group overhead).
+ require.GreaterOrEqual(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum())
+ }
+ })
})
t.Run("asynchronous evaluation with independent and dependent rules", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
+ ctx := t.Context()
- expectedRuleCount := 6
- expectedSampleCount := 4
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+ expectedRuleCount := 6
+ expectedSampleCount := 4
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
- for _, group := range groups {
- require.Len(t, group.rules, expectedRuleCount)
+ for _, group := range groups {
+ require.Len(t, group.rules, expectedRuleCount)
- start := time.Now()
- DefaultEvalIterationFunc(ctx, group, start)
+ start := time.Now()
+ DefaultEvalIterationFunc(ctx, group, start)
- // Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals.
- require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load())
- // Some rules should execute concurrently so should complete quicker.
- require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
- // Each recording rule produces one vector.
- require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
- }
+ // Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals.
+ require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load())
+ // Some rules should execute concurrently so should complete quicker.
+ require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
+ // Each recording rule produces one vector.
+ require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ }
+ })
})
t.Run("asynchronous evaluation of all independent rules, insufficient concurrency", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
+ ctx := t.Context()
- expectedRuleCount := 8
- expectedSampleCount := expectedRuleCount
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+ expectedRuleCount := 8
+ expectedSampleCount := expectedRuleCount
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
- for _, group := range groups {
- require.Len(t, group.rules, expectedRuleCount)
+ for _, group := range groups {
+ require.Len(t, group.rules, expectedRuleCount)
- start := time.Now()
- DefaultEvalIterationFunc(ctx, group, start)
+ start := time.Now()
+ DefaultEvalIterationFunc(ctx, group, start)
- // Expected evaluation order (isn't affected by concurrency settings)
- order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
- require.Equal(t, []ConcurrentRules{
- {0, 1, 2, 3, 4, 5, 6, 7},
- }, order)
+ // Expected evaluation order (isn't affected by concurrency settings)
+ order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
+ require.Equal(t, []ConcurrentRules{
+ {0, 1, 2, 3, 4, 5, 6, 7},
+ }, order)
- // Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals.
- require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load())
- // Some rules should execute concurrently so should complete quicker.
- require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
- // Each recording rule produces one vector.
- require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
- }
+ // Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals.
+ require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load())
+ // Some rules should execute concurrently so should complete quicker.
+ require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
+ // Each recording rule produces one vector.
+ require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ }
+ })
})
t.Run("asynchronous evaluation of all independent rules, sufficient concurrency", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
+ ctx := t.Context()
- expectedRuleCount := 8
- expectedSampleCount := expectedRuleCount
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+ expectedRuleCount := 8
+ expectedSampleCount := expectedRuleCount
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = int64(expectedRuleCount) * 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = int64(expectedRuleCount) * 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
- for _, group := range groups {
- require.Len(t, group.rules, expectedRuleCount)
+ for _, group := range groups {
+ require.Len(t, group.rules, expectedRuleCount)
+
+ start := time.Now()
+
+ DefaultEvalIterationFunc(ctx, group, start)
+
+ // Expected evaluation order
+ order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
+ require.Equal(t, []ConcurrentRules{
+ {0, 1, 2, 3, 4, 5, 6, 7},
+ }, order)
+
+ // Max inflight can be up to MaxConcurrentEvals concurrent evals, since there is sufficient concurrency to run all rules at once.
+ require.LessOrEqual(t, int64(maxInflight.Load()), opts.MaxConcurrentEvals)
+ // Some rules should execute concurrently so should complete quicker.
+ require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
+ // Each recording rule produces one vector.
+ require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ // Group duration is less than the sum of rule durations
+ require.Less(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum())
+ }
+ })
+ })
+
+ t.Run("asynchronous evaluation of independent rules, with indeterminate. Should be synchronous", func(t *testing.T) {
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
+
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
+
+ ctx := t.Context()
+
+ ruleCount := 7
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = int64(ruleCount) * 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
+
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_indeterminates.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
+
+ for _, group := range groups {
+ require.Len(t, group.rules, ruleCount)
+
+ start := time.Now()
+
+ group.Eval(ctx, start)
+
+ // Never expect more than 1 inflight query at a time.
+ require.EqualValues(t, 1, maxInflight.Load())
+ // Each rule should take at least 1 second to execute sequentially.
+ require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
+ // Each rule produces one vector.
+ require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ }
+ })
+ })
+
+ t.Run("asynchronous evaluation of rules that benefit from reordering", func(t *testing.T) {
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
+
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
+
+ ctx := t.Context()
+
+ ruleCount := 8
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = int64(ruleCount) * 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
+
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_dependents_on_base.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
+ var group *Group
+ for _, g := range groups {
+ group = g
+ }
start := time.Now()
- DefaultEvalIterationFunc(ctx, group, start)
-
// Expected evaluation order
order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
require.Equal(t, []ConcurrentRules{
- {0, 1, 2, 3, 4, 5, 6, 7},
+ {0, 4},
+ {1, 2, 3, 5, 6, 7},
}, order)
- // Max inflight can be up to MaxConcurrentEvals concurrent evals, since there is sufficient concurrency to run all rules at once.
- require.LessOrEqual(t, int64(maxInflight.Load()), opts.MaxConcurrentEvals)
- // Some rules should execute concurrently so should complete quicker.
- require.Less(t, time.Since(start).Seconds(), (time.Duration(expectedRuleCount) * artificialDelay).Seconds())
- // Each recording rule produces one vector.
- require.EqualValues(t, expectedSampleCount, testutil.ToFloat64(group.metrics.GroupSamples))
- // Group duration is less than the sum of rule durations
- require.Less(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum())
- }
- })
-
- t.Run("asynchronous evaluation of independent rules, with indeterminate. Should be synchronous", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
-
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
-
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
-
- ruleCount := 7
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
-
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = int64(ruleCount) * 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
-
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_indeterminates.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
-
- for _, group := range groups {
- require.Len(t, group.rules, ruleCount)
-
- start := time.Now()
-
group.Eval(ctx, start)
- // Never expect more than 1 inflight query at a time.
- require.EqualValues(t, 1, maxInflight.Load())
- // Each rule should take at least 1 second to execute sequentially.
- require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
+ // Inflight queries should be equal to 6. This is the size of the second batch of rules that can be executed concurrently.
+ require.EqualValues(t, 6, maxInflight.Load())
+ // Some rules should execute concurrently so should complete quicker.
+ require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
// Each rule produces one vector.
require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
- }
- })
-
- t.Run("asynchronous evaluation of rules that benefit from reordering", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
-
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
-
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
-
- ruleCount := 8
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
-
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = int64(ruleCount) * 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
-
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_dependents_on_base.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
- var group *Group
- for _, g := range groups {
- group = g
- }
-
- start := time.Now()
-
- // Expected evaluation order
- order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
- require.Equal(t, []ConcurrentRules{
- {0, 4},
- {1, 2, 3, 5, 6, 7},
- }, order)
-
- group.Eval(ctx, start)
-
- // Inflight queries should be equal to 6. This is the size of the second batch of rules that can be executed concurrently.
- require.EqualValues(t, 6, maxInflight.Load())
- // Some rules should execute concurrently so should complete quicker.
- require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
- // Each rule produces one vector.
- require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ })
})
t.Run("attempted asynchronous evaluation of chained rules", func(t *testing.T) {
- t.Parallel()
- storage := teststorage.New(t)
+ synctest.Test(t, func(t *testing.T) {
+ storage := teststorage.New(t)
- inflightQueries := atomic.Int32{}
- maxInflight := atomic.Int32{}
+ inflightQueries := atomic.Int32{}
+ maxInflight := atomic.Int32{}
- ctx, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
+ ctx := t.Context()
- ruleCount := 7
- opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
+ ruleCount := 7
+ opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
- // Configure concurrency settings.
- opts.ConcurrentEvalsEnabled = true
- opts.MaxConcurrentEvals = int64(ruleCount) * 2
- opts.RuleConcurrencyController = nil
- ruleManager := NewManager(opts)
+ // Configure concurrency settings.
+ opts.ConcurrentEvalsEnabled = true
+ opts.MaxConcurrentEvals = int64(ruleCount) * 2
+ opts.RuleConcurrencyController = nil
+ ruleManager := NewManager(opts)
- groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_chain.yaml"}...)
- require.Empty(t, errs)
- require.Len(t, groups, 1)
- var group *Group
- for _, g := range groups {
- group = g
- }
+ groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_chain.yaml"}...)
+ require.Empty(t, errs)
+ require.Len(t, groups, 1)
+ var group *Group
+ for _, g := range groups {
+ group = g
+ }
- start := time.Now()
+ start := time.Now()
- // Expected evaluation order
- order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
- require.Equal(t, []ConcurrentRules{
- {0, 1},
- {2},
- {3},
- {4, 5, 6},
- }, order)
+ // Expected evaluation order
+ order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
+ require.Equal(t, []ConcurrentRules{
+ {0, 1},
+ {2},
+ {3},
+ {4, 5, 6},
+ }, order)
- group.Eval(ctx, start)
+ group.Eval(ctx, start)
- require.EqualValues(t, 3, maxInflight.Load())
- // Some rules should execute concurrently so should complete quicker.
- require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
- // Each rule produces one vector.
- require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ require.EqualValues(t, 3, maxInflight.Load())
+ // Some rules should execute concurrently so should complete quicker.
+ require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
+ // Each rule produces one vector.
+ require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
+ })
})
}
@@ -2472,11 +2473,9 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
// Evaluate groups concurrently (like they normally do).
var wg sync.WaitGroup
for _, group := range groups {
- wg.Add(1)
- go func() {
+ wg.Go(func() {
group.Eval(ctx, time.Now())
- wg.Done()
- }()
+ })
}
wg.Wait()
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go
index 730486772e..1d321218e7 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go
@@ -228,6 +228,9 @@ func (c *PrometheusConverter) addHistogramDataPoints(
}
pt := dataPoints.At(x)
+ // Clear stale exemplars from the previous data point to prevent
+ // them from leaking into _sum and _count of this data point.
+ appOpts.Exemplars = nil
timestamp := convertTimeStamp(pt.Timestamp())
startTimestamp := convertTimeStamp(pt.StartTimestamp())
baseLabels, err := c.createAttributes(pt.Attributes(), settings, reservedLabelNames, false, appOpts.Metadata)
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
index c3fecc813b..f4f5283164 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
@@ -30,6 +30,7 @@ import (
"go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb"
@@ -955,6 +956,121 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
}
}
+// TestAddHistogramDataPoints_ExemplarLeakAcrossDataPoints verifies that
+// exemplars from a previous data point don't leak into _sum/_count of the
+// next data point. Regression test for stale exemplar leak.
+func TestAddHistogramDataPoints_ExemplarLeakAcrossDataPoints(t *testing.T) {
+ ts := pcommon.Timestamp(time.Now().UnixNano())
+ exTs := pcommon.Timestamp(time.Now().Add(time.Second).UnixNano())
+
+ metric := pmetric.NewMetric()
+ metric.SetName("test_hist")
+ metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+
+ // First data point: has buckets and an exemplar with value 200 (> bound 100, so falls into +Inf).
+ pt1 := metric.Histogram().DataPoints().AppendEmpty()
+ pt1.SetTimestamp(ts)
+ pt1.SetStartTimestamp(ts)
+ pt1.SetSum(42)
+ pt1.SetCount(10)
+ pt1.ExplicitBounds().FromRaw([]float64{100})
+ pt1.BucketCounts().FromRaw([]uint64{7, 3})
+
+ ex := pt1.Exemplars().AppendEmpty()
+ ex.SetTimestamp(exTs)
+ ex.SetDoubleValue(200) // > 100, so falls into the +Inf bucket.
+
+ // Second data point: no exemplars.
+ pt2 := metric.Histogram().DataPoints().AppendEmpty()
+ pt2.SetTimestamp(ts)
+ pt2.SetStartTimestamp(ts)
+ pt2.SetSum(84)
+ pt2.SetCount(20)
+ pt2.ExplicitBounds().FromRaw([]float64{100})
+ pt2.BucketCounts().FromRaw([]uint64{14, 6})
+
+ appTest := teststorage.NewAppendable()
+ app := appTest.AppenderV2(t.Context())
+ converter := NewPrometheusConverter(app)
+ settings := Settings{}
+ resource := pcommon.NewResource()
+
+ require.NoError(t, converter.setResourceContext(resource, settings))
+ require.NoError(t, converter.setScopeContext(scope{}, settings))
+ require.NoError(t, converter.addHistogramDataPoints(
+ context.Background(),
+ metric.Histogram().DataPoints(),
+ settings,
+ storage.AOptions{
+ MetricFamilyName: metric.Name(),
+ },
+ ))
+ require.NoError(t, app.Commit())
+
+ exConverted := exemplar.Exemplar{
+ Value: 200,
+ Ts: convertTimeStamp(exTs),
+ HasTs: true,
+ }
+ tsMs := convertTimeStamp(ts)
+
+ want := []sample{
+ // -- First data point --
+ // _sum: no exemplars.
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_sum"),
+ T: tsMs, ST: tsMs, V: 42,
+ },
+ // _count: no exemplars.
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_count"),
+ T: tsMs, ST: tsMs, V: 10,
+ },
+ // le=100 bucket: no exemplars (exemplar value 200 > 100).
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_bucket", model.BucketLabel, "100"),
+ T: tsMs, ST: tsMs, V: 7,
+ },
+ // le=+Inf bucket: gets the exemplar.
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_bucket", model.BucketLabel, "+Inf"),
+ T: tsMs, ST: tsMs, V: 10,
+ ES: []exemplar.Exemplar{exConverted},
+ },
+ // -- Second data point --
+ // _sum: NO exemplars (this is the regression check).
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_sum"),
+ T: tsMs, ST: tsMs, V: 84,
+ },
+ // _count: NO exemplars (this is the regression check).
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_count"),
+ T: tsMs, ST: tsMs, V: 20,
+ },
+ // le=100 bucket: no exemplars.
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_bucket", model.BucketLabel, "100"),
+ T: tsMs, ST: tsMs, V: 14,
+ },
+ // le=+Inf bucket: no exemplars.
+ {
+ MF: "test_hist",
+ L: labels.FromStrings(model.MetricNameLabel, "test_hist_bucket", model.BucketLabel, "+Inf"),
+ T: tsMs, ST: tsMs, V: 20,
+ },
+ }
+
+ teststorage.RequireEqual(t, want, appTest.ResultSamples())
+}
+
func TestGetPromExemplars(t *testing.T) {
ctx := context.Background()
c := NewPrometheusConverter(teststorage.NewAppendable().AppenderV2(t.Context()))
diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go
index 3dac96f6a0..9fdd750692 100644
--- a/storage/remote/write_handler.go
+++ b/storage/remote/write_handler.go
@@ -225,7 +225,8 @@ func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample,
if err != nil {
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
- errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
+ errors.Is(err, storage.ErrDuplicateSampleForTimestamp) ||
+ errors.Is(err, storage.ErrTooOldSample) {
h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
@@ -247,7 +248,8 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist
// a note indicating its inclusion in the future.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
- errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
+ errors.Is(err, storage.ErrDuplicateSampleForTimestamp) ||
+ errors.Is(err, storage.ErrTooOldSample) {
h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
}
return err
@@ -409,7 +411,8 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
// a note indicating its inclusion in the future.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
- errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
+ errors.Is(err, storage.ErrDuplicateSampleForTimestamp) ||
+ errors.Is(err, storage.ErrTooOldSample) {
// TODO(bwplotka): Not too spammy log?
h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp)
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
diff --git a/storage/remote/write_otlp_handler.go b/storage/remote/write_otlp_handler.go
index b8888baeb8..6cb4a0fff0 100644
--- a/storage/remote/write_otlp_handler.go
+++ b/storage/remote/write_otlp_handler.go
@@ -176,7 +176,7 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch {
case err == nil:
- case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
+ case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
// Indicated an out of order sample is a bad request to prevent retries.
http.Error(w, err.Error(), http.StatusBadRequest)
return
diff --git a/tsdb/chunks/chunk_write_queue.go b/tsdb/chunks/chunk_write_queue.go
index 1a046ea00a..a87c2602cd 100644
--- a/tsdb/chunks/chunk_write_queue.go
+++ b/tsdb/chunks/chunk_write_queue.go
@@ -111,10 +111,7 @@ func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChu
}
func (c *chunkWriteQueue) start() {
- c.workerWg.Add(1)
- go func() {
- defer c.workerWg.Done()
-
+ c.workerWg.Go(func() {
for {
job, ok := c.jobs.pop()
if !ok {
@@ -123,7 +120,7 @@ func (c *chunkWriteQueue) start() {
c.processJob(job)
}
- }()
+ })
c.isRunningMtx.Lock()
c.isRunning = true
diff --git a/tsdb/chunks/queue_test.go b/tsdb/chunks/queue_test.go
index 377a8181ff..2e3fff59a8 100644
--- a/tsdb/chunks/queue_test.go
+++ b/tsdb/chunks/queue_test.go
@@ -269,34 +269,26 @@ func TestQueuePushPopManyGoroutines(t *testing.T) {
readersWG := sync.WaitGroup{}
for range readGoroutines {
- readersWG.Add(1)
-
- go func() {
- defer readersWG.Done()
-
+ readersWG.Go(func() {
for j, ok := queue.pop(); ok; j, ok = queue.pop() {
refsMx.Lock()
refs[j.seriesRef] = true
refsMx.Unlock()
}
- }()
+ })
}
id := atomic.Uint64{}
writersWG := sync.WaitGroup{}
for range writeGoroutines {
- writersWG.Add(1)
-
- go func() {
- defer writersWG.Done()
-
+ writersWG.Go(func() {
for range writes {
ref := id.Inc()
require.True(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(ref)}))
}
- }()
+ })
}
// Wait until all writes are done.
diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go
index afe15a5f31..44a0921eec 100644
--- a/tsdb/compact_test.go
+++ b/tsdb/compact_test.go
@@ -1717,10 +1717,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
-
+ wg.Go(func() {
// Ingest sparse histograms.
for _, ah := range allSparseSeries {
var (
@@ -1743,7 +1740,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
sparseULIDs, err = compactor.Write(sparseHead.opts.ChunkDirRoot, sparseHead, mint, maxt, nil)
require.NoError(t, err)
require.Len(t, sparseULIDs, 1)
- }()
+ })
wg.Add(1)
go func(c testcase) {
diff --git a/tsdb/db.go b/tsdb/db.go
index 81c7a6c460..69d2d3af41 100644
--- a/tsdb/db.go
+++ b/tsdb/db.go
@@ -929,9 +929,13 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
for _, tmpDir := range []string{walDir, dir} {
// Remove tmp dirs.
- if err := removeBestEffortTmpDirs(l, tmpDir); err != nil {
+ if err := tsdbutil.RemoveTmpDirs(l, tmpDir, isTmpDir); err != nil {
return nil, fmt.Errorf("remove tmp dirs: %w", err)
}
+ // Remove any temporary checkpoints that might have been interrupted during creation.
+ if err := wlog.DeleteTempCheckpoints(l, tmpDir); err != nil {
+ return nil, fmt.Errorf("delete temp checkpoints: %w", err)
+ }
}
db := &DB{
@@ -1116,26 +1120,6 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
return db, nil
}
-func removeBestEffortTmpDirs(l *slog.Logger, dir string) error {
- files, err := os.ReadDir(dir)
- if os.IsNotExist(err) {
- return nil
- }
- if err != nil {
- return err
- }
- for _, f := range files {
- if isTmpDir(f) {
- if err := os.RemoveAll(filepath.Join(dir, f.Name())); err != nil {
- l.Error("failed to delete tmp block dir", "dir", filepath.Join(dir, f.Name()), "err", err)
- continue
- }
- l.Info("Found and deleted tmp block dir", "dir", filepath.Join(dir, f.Name()))
- }
- }
- return nil
-}
-
// StartTime implements the Storage interface.
func (db *DB) StartTime() (int64, error) {
db.mtx.RLock()
@@ -2539,8 +2523,7 @@ func isBlockDir(fi fs.DirEntry) bool {
return err == nil
}
-// isTmpDir returns true if the given file-info contains a block ULID, a checkpoint prefix,
-// or a chunk snapshot prefix and a tmp extension.
+// isTmpDir returns true if the given file-info contains a block ULID, or a chunk snapshot prefix and a tmp extension.
func isTmpDir(fi fs.DirEntry) bool {
if !fi.IsDir() {
return false
@@ -2549,9 +2532,6 @@ func isTmpDir(fi fs.DirEntry) bool {
fn := fi.Name()
ext := filepath.Ext(fn)
if ext == tmpForDeletionBlockDirSuffix || ext == tmpForCreationBlockDirSuffix || ext == tmpLegacy {
- if strings.HasPrefix(fn, wlog.CheckpointPrefix) {
- return true
- }
if strings.HasPrefix(fn, chunkSnapshotPrefix) {
return true
}
diff --git a/tsdb/db_append_v2_test.go b/tsdb/db_append_v2_test.go
index acd72a986f..a3d74efefd 100644
--- a/tsdb/db_append_v2_test.go
+++ b/tsdb/db_append_v2_test.go
@@ -1846,6 +1846,7 @@ func TestBlockRanges_AppendV2(t *testing.T) {
createBlock(t, dir, genSeries(1, 1, 0, firstBlockMaxT))
db, err := open(dir, logger, nil, DefaultOptions(), []int64{10000}, nil)
require.NoError(t, err)
+ db.DisableCompactions()
rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 + 1
@@ -1862,21 +1863,16 @@ func TestBlockRanges_AppendV2(t *testing.T) {
require.NoError(t, err)
require.NoError(t, app.Commit())
- for range 100 {
- if len(db.Blocks()) == 2 {
- break
- }
- time.Sleep(100 * time.Millisecond)
- }
- require.Len(t, db.Blocks(), 2, "no new block created after the set timeout")
+ require.NoError(t, db.Compact(ctx))
+ blocks := db.Blocks()
+ require.Len(t, blocks, 2, "no new block after compaction")
- require.LessOrEqual(t, db.Blocks()[1].Meta().MinTime, db.Blocks()[0].Meta().MaxTime,
- "new block overlaps old:%v,new:%v", db.Blocks()[0].Meta(), db.Blocks()[1].Meta())
+ require.GreaterOrEqual(t, blocks[1].Meta().MinTime, blocks[0].Meta().MaxTime,
+ "new block overlaps old:%v,new:%v", blocks[0].Meta(), blocks[1].Meta())
// Test that wal records are skipped when an existing block covers the same time ranges
// and compaction doesn't create an overlapping block.
app = db.AppenderV2(ctx)
- db.DisableCompactions()
_, err = app.Append(0, lbl, 0, secondBlockMaxt+1, rand.Float64(), nil, nil, storage.AOptions{})
require.NoError(t, err)
_, err = app.Append(0, lbl, 0, secondBlockMaxt+2, rand.Float64(), nil, nil, storage.AOptions{})
@@ -1893,6 +1889,7 @@ func TestBlockRanges_AppendV2(t *testing.T) {
db, err = open(dir, logger, nil, DefaultOptions(), []int64{10000}, nil)
require.NoError(t, err)
+ db.DisableCompactions()
defer db.Close()
require.Len(t, db.Blocks(), 3, "db doesn't include expected number of blocks")
@@ -1902,17 +1899,12 @@ func TestBlockRanges_AppendV2(t *testing.T) {
_, err = app.Append(0, lbl, 0, thirdBlockMaxt+rangeToTriggerCompaction, rand.Float64(), nil, nil, storage.AOptions{}) // Trigger a compaction
require.NoError(t, err)
require.NoError(t, app.Commit())
- for range 100 {
- if len(db.Blocks()) == 4 {
- break
- }
- time.Sleep(100 * time.Millisecond)
- }
+ require.NoError(t, db.Compact(ctx))
+ blocks = db.Blocks()
+ require.Len(t, blocks, 4, "no new block after compaction")
- require.Len(t, db.Blocks(), 4, "no new block created after the set timeout")
-
- require.LessOrEqual(t, db.Blocks()[3].Meta().MinTime, db.Blocks()[2].Meta().MaxTime,
- "new block overlaps old:%v,new:%v", db.Blocks()[2].Meta(), db.Blocks()[3].Meta())
+ require.GreaterOrEqual(t, blocks[3].Meta().MinTime, blocks[2].Meta().MaxTime,
+ "new block overlaps old:%v,new:%v", blocks[2].Meta(), blocks[3].Meta())
}
// TestDBReadOnly ensures that opening a DB in readonly mode doesn't modify any files on the disk.
diff --git a/tsdb/db_test.go b/tsdb/db_test.go
index 13464c26e5..a411f2861f 100644
--- a/tsdb/db_test.go
+++ b/tsdb/db_test.go
@@ -2419,6 +2419,7 @@ func TestBlockRanges(t *testing.T) {
createBlock(t, dir, genSeries(1, 1, 0, firstBlockMaxT))
db, err := open(dir, logger, nil, DefaultOptions(), []int64{10000}, nil)
require.NoError(t, err)
+ db.DisableCompactions()
rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 + 1
@@ -2435,21 +2436,16 @@ func TestBlockRanges(t *testing.T) {
require.NoError(t, err)
require.NoError(t, app.Commit())
- for range 100 {
- if len(db.Blocks()) == 2 {
- break
- }
- time.Sleep(100 * time.Millisecond)
- }
- require.Len(t, db.Blocks(), 2, "no new block created after the set timeout")
+ require.NoError(t, db.Compact(ctx))
+ blocks := db.Blocks()
+ require.Len(t, blocks, 2, "no new block after compaction")
- require.LessOrEqual(t, db.Blocks()[1].Meta().MinTime, db.Blocks()[0].Meta().MaxTime,
- "new block overlaps old:%v,new:%v", db.Blocks()[0].Meta(), db.Blocks()[1].Meta())
+ require.GreaterOrEqual(t, blocks[1].Meta().MinTime, blocks[0].Meta().MaxTime,
+ "new block overlaps old:%v,new:%v", blocks[0].Meta(), blocks[1].Meta())
// Test that wal records are skipped when an existing block covers the same time ranges
// and compaction doesn't create an overlapping block.
app = db.Appender(ctx)
- db.DisableCompactions()
_, err = app.Append(0, lbl, secondBlockMaxt+1, rand.Float64())
require.NoError(t, err)
_, err = app.Append(0, lbl, secondBlockMaxt+2, rand.Float64())
@@ -2466,6 +2462,7 @@ func TestBlockRanges(t *testing.T) {
db, err = open(dir, logger, nil, DefaultOptions(), []int64{10000}, nil)
require.NoError(t, err)
+ db.DisableCompactions()
defer db.Close()
require.Len(t, db.Blocks(), 3, "db doesn't include expected number of blocks")
@@ -2475,17 +2472,12 @@ func TestBlockRanges(t *testing.T) {
_, err = app.Append(0, lbl, thirdBlockMaxt+rangeToTriggerCompaction, rand.Float64()) // Trigger a compaction
require.NoError(t, err)
require.NoError(t, app.Commit())
- for range 100 {
- if len(db.Blocks()) == 4 {
- break
- }
- time.Sleep(100 * time.Millisecond)
- }
+ require.NoError(t, db.Compact(ctx))
+ blocks = db.Blocks()
+ require.Len(t, blocks, 4, "no new block after compaction")
- require.Len(t, db.Blocks(), 4, "no new block created after the set timeout")
-
- require.LessOrEqual(t, db.Blocks()[3].Meta().MinTime, db.Blocks()[2].Meta().MaxTime,
- "new block overlaps old:%v,new:%v", db.Blocks()[2].Meta(), db.Blocks()[3].Meta())
+ require.GreaterOrEqual(t, blocks[3].Meta().MinTime, blocks[2].Meta().MaxTime,
+ "new block overlaps old:%v,new:%v", blocks[2].Meta(), blocks[3].Meta())
}
// TestDBReadOnly ensures that opening a DB in readonly mode doesn't modify any files on the disk.
diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go
index 0d45f56b3e..10a0745d87 100644
--- a/tsdb/exemplar_test.go
+++ b/tsdb/exemplar_test.go
@@ -1267,10 +1267,8 @@ func debugCircularBuffer(ce *CircularExemplarStorage) string {
if e.ref == nil {
continue
}
- sb.WriteString(fmt.Sprintf(
- "i: %d, ts: %d, next: %d, prev: %d",
- i, e.exemplar.Ts, e.next, e.prev,
- ))
+ fmt.Fprintf(&sb, "i: %d, ts: %d, next: %d, prev: %d",
+ i, e.exemplar.Ts, e.next, e.prev)
for _, idx := range ce.index {
if i == idx.newest {
sb.WriteString(" <- newest " + idx.seriesLabels.String())
@@ -1281,6 +1279,6 @@ func debugCircularBuffer(ce *CircularExemplarStorage) string {
}
sb.WriteString("\n")
}
- sb.WriteString(fmt.Sprintf("Next index: %d\n", ce.nextIndex))
+ fmt.Fprintf(&sb, "Next index: %d\n", ce.nextIndex)
return sb.String()
}
diff --git a/tsdb/head_append_v2_test.go b/tsdb/head_append_v2_test.go
index ccc75e18ed..ec4f3f5857 100644
--- a/tsdb/head_append_v2_test.go
+++ b/tsdb/head_append_v2_test.go
@@ -1334,13 +1334,11 @@ func TestDataMissingOnQueryDuringCompaction_AppenderV2(t *testing.T) {
require.NoError(t, err)
var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
+ wg.Go(func() {
// Compacting head while the querier spans the compaction time.
require.NoError(t, db.Compact(ctx))
require.NotEmpty(t, db.Blocks())
- }()
+ })
// Give enough time for compaction to finish.
// We expect it to be blocked until querier is closed.
diff --git a/tsdb/head_test.go b/tsdb/head_test.go
index f03ec05572..c1326893ef 100644
--- a/tsdb/head_test.go
+++ b/tsdb/head_test.go
@@ -3355,12 +3355,10 @@ func testHeadSeriesChunkRace(t *testing.T) {
defer q.Close()
var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
+ wg.Go(func() {
h.updateMinMaxTime(20, 25)
h.gc()
- }()
+ })
ss := q.Select(context.Background(), false, nil, matcher)
for ss.Next() {
}
@@ -3844,13 +3842,11 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) {
s := ss.At()
var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
+ wg.Go(func() {
// Compacting head while the querier spans the compaction time.
require.NoError(t, db.Compact(ctx))
require.NotEmpty(t, db.Blocks())
- }()
+ })
// Give enough time for compaction to finish.
// We expect it to be blocked until querier is closed.
@@ -3908,13 +3904,11 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) {
require.NoError(t, err)
var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
+ wg.Go(func() {
// Compacting head while the querier spans the compaction time.
require.NoError(t, db.Compact(ctx))
require.NotEmpty(t, db.Blocks())
- }()
+ })
// Give enough time for compaction to finish.
// We expect it to be blocked until querier is closed.
diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go
index 31b93f850d..c0bf213c45 100644
--- a/tsdb/index/postings.go
+++ b/tsdb/index/postings.go
@@ -956,7 +956,7 @@ func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int,
}
if p.At() == h.at() {
indexes = append(indexes, h.popIndex())
- } else if err := h.next(); err != nil {
+ } else if err := h.seekHead(p.At()); err != nil {
return nil, err
}
}
@@ -999,20 +999,18 @@ func (h *postingsWithIndexHeap) popIndex() int {
// at provides the storage.SeriesRef where root Postings is pointing at this moment.
func (h postingsWithIndexHeap) at() storage.SeriesRef { return h[0].p.At() }
-// next performs the Postings.Next() operation on the root of the heap, performing the related operation on the heap
-// and conveniently returning the result of calling Postings.Err() if the result of calling Next() was false.
-// If Next() succeeds, heap is fixed to move the root to its new position, according to its Postings.At() value.
-// If Next() returns fails and there's no error reported by Postings.Err(), then root is marked as removed and heap is fixed.
-func (h *postingsWithIndexHeap) next() error {
+// seekHead performs the Postings.Seek() operation on the root of the heap.
+// If the root is exhausted or fails, it is removed from the heap.
+func (h *postingsWithIndexHeap) seekHead(val storage.SeriesRef) error {
pi := (*h)[0]
- next := pi.p.Next()
+ next := pi.p.Seek(val)
if next {
heap.Fix(h, 0)
return nil
}
if err := pi.p.Err(); err != nil {
- return fmt.Errorf("postings %d: %w", pi.index, err)
+ return fmt.Errorf("seek postings %d: %w", pi.index, err)
}
h.popIndex()
return nil
diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go
index 77b43f76ab..5c67a2da6d 100644
--- a/tsdb/index/postings_test.go
+++ b/tsdb/index/postings_test.go
@@ -1192,7 +1192,7 @@ func (p *postingsFailingAfterNthCall) Err() error {
}
func TestPostingsWithIndexHeap(t *testing.T) {
- t.Run("iterate", func(t *testing.T) {
+ t.Run("seekHead", func(t *testing.T) {
h := postingsWithIndexHeap{
{index: 0, p: NewListPostings([]storage.SeriesRef{10, 20, 30})},
{index: 1, p: NewListPostings([]storage.SeriesRef{1, 5})},
@@ -1205,7 +1205,7 @@ func TestPostingsWithIndexHeap(t *testing.T) {
for _, expected := range []storage.SeriesRef{1, 5, 10, 20, 25, 30, 50} {
require.Equal(t, expected, h.at())
- require.NoError(t, h.next())
+ require.NoError(t, h.seekHead(h.at()+1))
}
require.True(t, h.empty())
})
@@ -1223,7 +1223,7 @@ func TestPostingsWithIndexHeap(t *testing.T) {
for _, expected := range []storage.SeriesRef{1, 5, 10, 20} {
require.Equal(t, expected, h.at())
- require.NoError(t, h.next())
+ require.NoError(t, h.seekHead(h.at()+1))
}
require.Equal(t, storage.SeriesRef(25), h.at())
node := heap.Pop(&h).(postingsWithIndex)
diff --git a/tsdb/isolation_test.go b/tsdb/isolation_test.go
index f2671024e8..2b2e1a6487 100644
--- a/tsdb/isolation_test.go
+++ b/tsdb/isolation_test.go
@@ -88,10 +88,7 @@ func BenchmarkIsolation(b *testing.B) {
start := make(chan struct{})
for range goroutines {
- wg.Add(1)
-
- go func() {
- defer wg.Done()
+ wg.Go(func() {
<-start
for b.Loop() {
@@ -99,7 +96,7 @@ func BenchmarkIsolation(b *testing.B) {
iso.closeAppend(appendID)
}
- }()
+ })
}
b.ResetTimer()
@@ -118,10 +115,7 @@ func BenchmarkIsolationWithState(b *testing.B) {
start := make(chan struct{})
for range goroutines {
- wg.Add(1)
-
- go func() {
- defer wg.Done()
+ wg.Go(func() {
<-start
for b.Loop() {
@@ -129,7 +123,7 @@ func BenchmarkIsolationWithState(b *testing.B) {
iso.closeAppend(appendID)
}
- }()
+ })
}
readers := goroutines / 100
@@ -138,17 +132,14 @@ func BenchmarkIsolationWithState(b *testing.B) {
}
for g := 0; g < readers; g++ {
- wg.Add(1)
-
- go func() {
- defer wg.Done()
+ wg.Go(func() {
<-start
for b.Loop() {
s := iso.State(math.MinInt64, math.MaxInt64)
s.Close()
}
- }()
+ })
}
b.ResetTimer()
diff --git a/tsdb/label_values_bench_test.go b/tsdb/label_values_bench_test.go
new file mode 100644
index 0000000000..1e55cf80c0
--- /dev/null
+++ b/tsdb/label_values_bench_test.go
@@ -0,0 +1,86 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+ "context"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/tsdb/wlog"
+)
+
+// BenchmarkLabelValues_SlowPath benchmarks the performance of LabelValues when the matcher
+// is far ahead of the candidate posting list. This reproduces the performance regression
+// described in #14551 where dense candidates caused O(N) iteration instead of O(log N) seeking.
+func BenchmarkLabelValues_SlowPath(b *testing.B) {
+ // Create a head with some data.
+ opts := DefaultHeadOptions()
+ opts.ChunkDirRoot = b.TempDir()
+ h, err := NewHead(nil, nil, nil, nil, opts, nil)
+ require.NoError(b, err)
+ defer h.Close()
+
+ app := h.Appender(context.Background())
+ // 1. Create a large number of series for a "candidate" label (e.g. "job").
+ // We want these to NOT match the target matcher, but be candidates for a different label.
+ // We use "job=api" and "instance=..."
+ // We want the interaction to be:
+ // LabelValues("instance", "job"="api")
+ // "job"="api" will have 1 series at the END.
+ // "instance" will have 100k series.
+
+ // Actually, let's stick to the reproduction case:
+ // distinct values for "val1".
+ // "b"="1" matcher.
+
+ // Create 100k series with the same label value ("common") but without the matcher label.
+ // This results in a single large posting list for that value, simulating a dense candidate.
+ for i := range 100000 {
+ _, err := app.Append(0, labels.FromStrings("val1", "common", "extra", strconv.Itoa(i)), time.Now().UnixMilli(), 1)
+ require.NoError(b, err)
+ }
+
+ // Create 1 series that matches the label "b=1", with a series ID greater than all previous ones.
+ // This forces the intersection to skip over all 100k previous candidates.
+ _, err = app.Append(0, labels.FromStrings("val1", "common", "b", "1"), time.Now().UnixMilli(), 1)
+ require.NoError(b, err)
+
+ require.NoError(b, app.Commit())
+
+ ctx := context.Background()
+ matcher := labels.MustNewMatcher(labels.MatchEqual, "b", "1")
+
+ // Use the correct method to access label values.
+ idx, err := h.Index()
+ require.NoError(b, err)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for b.Loop() {
+ // "val1"="common" has 100k+1 postings.
+ // "b=1" has 1 posting (the last one).
+ vals, err := idx.LabelValues(ctx, "val1", nil, matcher)
+ require.NoError(b, err)
+ require.Equal(b, []string{"common"}, vals)
+ }
+}
+
+// Ensure wlog/wal needed for NewHead.
+var _ = wlog.WL{}
diff --git a/tsdb/tsdbutil/remove_tmp_dirs.go b/tsdb/tsdbutil/remove_tmp_dirs.go
new file mode 100644
index 0000000000..a95db3159e
--- /dev/null
+++ b/tsdb/tsdbutil/remove_tmp_dirs.go
@@ -0,0 +1,45 @@
+// Copyright 2018 The Prometheus Authors
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdbutil
+
+import (
+ "io/fs"
+ "log/slog"
+ "os"
+ "path/filepath"
+)
+
+// RemoveTmpDirs attempts to remove directories in the specified directory which match the isTmpDir predicate.
+// Errors encountered during reading the directory that other than non-existence are returned. All other errors
+// encountered during removal of tmp directories are logged but do not cause early termination.
+func RemoveTmpDirs(l *slog.Logger, dir string, isTmpDir func(fi fs.DirEntry) bool) error {
+ files, err := os.ReadDir(dir)
+ if os.IsNotExist(err) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ for _, f := range files {
+ if isTmpDir(f) {
+ if err := os.RemoveAll(filepath.Join(dir, f.Name())); err != nil {
+ l.Error("failed to delete tmp dir", "dir", filepath.Join(dir, f.Name()), "err", err)
+ continue
+ }
+ l.Info("Found and deleted tmp dir", "dir", filepath.Join(dir, f.Name()))
+ }
+ }
+ return nil
+}
diff --git a/tsdb/tsdbutil/remove_tmp_dirs_test.go b/tsdb/tsdbutil/remove_tmp_dirs_test.go
new file mode 100644
index 0000000000..4ab282d3b3
--- /dev/null
+++ b/tsdb/tsdbutil/remove_tmp_dirs_test.go
@@ -0,0 +1,124 @@
+// Copyright 2018 The Prometheus Authors
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdbutil
+
+import (
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/common/promslog"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRemoveTmpDirs(t *testing.T) {
+ tests := []struct {
+ name string
+ isTmpDir func(fi fs.DirEntry) bool
+ setup func(t *testing.T, dir string)
+ expectedDirs []string // Directories that should remain after cleanup
+ }{
+ {
+ name: "remove directories with tmp prefix",
+ isTmpDir: func(fi fs.DirEntry) bool {
+ return fi.IsDir() && strings.HasPrefix(fi.Name(), "tmp")
+ },
+ setup: func(t *testing.T, dir string) {
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "tmpdir1"), 0o755))
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "tmpdir2"), 0o755))
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "normaldir"), 0o755))
+ },
+ expectedDirs: []string{"normaldir"},
+ },
+ {
+ name: "remove directories with specific suffix",
+ isTmpDir: func(fi fs.DirEntry) bool {
+ return fi.IsDir() && strings.HasSuffix(fi.Name(), ".tmp")
+ },
+ setup: func(t *testing.T, dir string) {
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "data.tmp"), 0o755))
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "cache.tmp"), 0o755))
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "permanent"), 0o755))
+ },
+ expectedDirs: []string{"permanent"},
+ },
+ {
+ name: "no temporary directories to remove",
+ isTmpDir: func(fi fs.DirEntry) bool {
+ return fi.IsDir() && strings.HasPrefix(fi.Name(), "tmp")
+ },
+ setup: func(t *testing.T, dir string) {
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "normaldir1"), 0o755))
+ require.NoError(t, os.Mkdir(filepath.Join(dir, "normaldir2"), 0o755))
+ },
+ expectedDirs: []string{"normaldir1", "normaldir2"},
+ },
+ {
+ name: "empty directory",
+ isTmpDir: func(fi fs.DirEntry) bool {
+ return fi.IsDir() && strings.HasPrefix(fi.Name(), "tmp")
+ },
+ setup: func(_ *testing.T, _ string) {}, // No setup needed - directory is empty
+ expectedDirs: []string{},
+ },
+ {
+ name: "directory with files only (no directories)",
+ isTmpDir: func(fi fs.DirEntry) bool {
+ return fi.IsDir() && strings.HasPrefix(fi.Name(), "tmp")
+ },
+ setup: func(t *testing.T, dir string) {
+ require.NoError(t, os.WriteFile(filepath.Join(dir, "tmpfile1.txt"), []byte("test"), 0o644))
+ require.NoError(t, os.WriteFile(filepath.Join(dir, "tmpfile2.txt"), []byte("test"), 0o644))
+ },
+ expectedDirs: []string{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ testDir := t.TempDir()
+
+ if tt.setup != nil {
+ tt.setup(t, testDir)
+ }
+
+ require.NoError(t, RemoveTmpDirs(promslog.NewNopLogger(), testDir, tt.isTmpDir))
+
+ entries, err := os.ReadDir(testDir)
+ require.NoError(t, err)
+
+ // Get actual remaining directories
+ var actualDirs []string
+ for _, entry := range entries {
+ if entry.IsDir() {
+ actualDirs = append(actualDirs, entry.Name())
+ }
+ }
+
+ require.ElementsMatch(t, tt.expectedDirs, actualDirs, "Remaining directories don't match expected")
+ })
+ }
+}
+
+func TestRemoveTmpDirs_NonExistentDirectory(t *testing.T) {
+ testDir := t.TempDir()
+ nonExistent := filepath.Join(testDir, "does_not_exist")
+
+ require.NoError(t, RemoveTmpDirs(promslog.NewNopLogger(), nonExistent, func(_ fs.DirEntry) bool {
+ return true
+ }))
+}
diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go
index 4c4a53e1b4..a41935044d 100644
--- a/tsdb/wlog/checkpoint.go
+++ b/tsdb/wlog/checkpoint.go
@@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"io"
+ "io/fs"
"log/slog"
"math"
"os"
@@ -31,6 +32,7 @@ import (
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
+ "github.com/prometheus/prometheus/tsdb/tsdbutil"
)
// CheckpointStats returns stats about a created checkpoint.
@@ -80,8 +82,16 @@ func DeleteCheckpoints(dir string, maxIndex int) error {
return errors.Join(errs...)
}
-// CheckpointPrefix is the prefix used for checkpoint files.
-const CheckpointPrefix = "checkpoint."
+// checkpointTempFileSuffix is the suffix used when creating temporary checkpoint files.
+const checkpointTempFileSuffix = ".tmp"
+
+// DeleteTempCheckpoints deletes all temporary checkpoint directories in the given directory.
+func DeleteTempCheckpoints(logger *slog.Logger, dir string) error {
+ if err := tsdbutil.RemoveTmpDirs(logger, dir, isTempDir); err != nil {
+ return fmt.Errorf("remove previous temporary checkpoint dirs: %w", err)
+ }
+ return nil
+}
// Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL.
// It includes the most recent checkpoint if it exists.
@@ -123,13 +133,13 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
defer sgmReader.Close()
}
- cpdir := checkpointDir(w.Dir(), to)
- cpdirtmp := cpdir + ".tmp"
-
- if err := os.RemoveAll(cpdirtmp); err != nil {
- return nil, fmt.Errorf("remove previous temporary checkpoint dir: %w", err)
+ if err := DeleteTempCheckpoints(logger, w.Dir()); err != nil {
+ return nil, err
}
+ cpdir := checkpointDir(w.Dir(), to)
+ cpdirtmp := cpdir + checkpointTempFileSuffix
+
if err := os.MkdirAll(cpdirtmp, 0o777); err != nil {
return nil, fmt.Errorf("create checkpoint dir: %w", err)
}
@@ -394,8 +404,11 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
return stats, nil
}
+// checkpointPrefix is the prefix used for checkpoint files.
+const checkpointPrefix = "checkpoint."
+
func checkpointDir(dir string, i int) string {
- return filepath.Join(dir, fmt.Sprintf(CheckpointPrefix+"%08d", i))
+ return filepath.Join(dir, fmt.Sprintf(checkpointPrefix+"%08d", i))
}
type checkpointRef struct {
@@ -411,13 +424,13 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) {
for i := range files {
fi := files[i]
- if !strings.HasPrefix(fi.Name(), CheckpointPrefix) {
+ if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
continue
}
if !fi.IsDir() {
return nil, fmt.Errorf("checkpoint %s is not a directory", fi.Name())
}
- idx, err := strconv.Atoi(fi.Name()[len(CheckpointPrefix):])
+ idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
if err != nil {
continue
}
@@ -431,3 +444,7 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) {
return refs, nil
}
+
+func isTempDir(fi fs.DirEntry) bool {
+ return strings.HasPrefix(fi.Name(), checkpointPrefix) && strings.HasSuffix(fi.Name(), checkpointTempFileSuffix)
+}
diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go
index b491a27455..9056aab70b 100644
--- a/tsdb/wlog/checkpoint_test.go
+++ b/tsdb/wlog/checkpoint_test.go
@@ -423,3 +423,81 @@ func TestCheckpointNoTmpFolderAfterError(t *testing.T) {
})
}
}
+
+func TestCheckpointDeletesTemporaryCheckpoints(t *testing.T) {
+ dir := t.TempDir()
+
+ // Create one tmp checkpoint directory
+ require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00001000.tmp"), 0o777))
+
+ w, err := New(nil, nil, dir, compression.None)
+ require.NoError(t, err)
+ defer w.Close()
+
+ _, err = Checkpoint(promslog.NewNopLogger(), w, 0, 1000, func(_ chunks.HeadSeriesRef) bool { return true }, 1000, false)
+ require.NoError(t, err)
+
+ files, err := os.ReadDir(dir)
+ require.NoError(t, err)
+
+ var actualDirectories []string
+ for _, f := range files {
+ if !f.IsDir() {
+ continue
+ }
+ actualDirectories = append(actualDirectories, f.Name())
+ }
+ require.Equal(t, []string{"checkpoint.00001000"}, actualDirectories)
+}
+
+func TestDeleteTempCheckpoints(t *testing.T) {
+ testCases := []struct {
+ name string
+ checkpointDirectoriesToCreate []string
+ expectedDirectories []string
+ }{
+ {
+ name: "no tmp checkpoints",
+ checkpointDirectoriesToCreate: nil,
+ expectedDirectories: nil,
+ },
+ {
+ name: "one tmp checkpoint",
+ checkpointDirectoriesToCreate: []string{"checkpoint.00001000.tmp"},
+ expectedDirectories: nil,
+ },
+ {
+ name: "many tmp checkpoints",
+ checkpointDirectoriesToCreate: []string{"checkpoint.00000001.tmp", "checkpoint.00001000.tmp", "checkpoint.00002000.tmp"},
+ expectedDirectories: nil,
+ },
+ {
+ name: "mix of tmp and regular checkpoints",
+ checkpointDirectoriesToCreate: []string{"checkpoint.00000001", "checkpoint.00000001.tmp", "checkpoint.00001000.tmp", "checkpoint.00002000"},
+ expectedDirectories: []string{"checkpoint.00000001", "checkpoint.00002000"},
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ dir := t.TempDir()
+ for _, fn := range tc.checkpointDirectoriesToCreate {
+ require.NoError(t, os.MkdirAll(filepath.Join(dir, fn), 0o777))
+ }
+
+ require.NoError(t, DeleteTempCheckpoints(promslog.NewNopLogger(), dir))
+
+ files, err := os.ReadDir(dir)
+ require.NoError(t, err)
+
+ var actualDirectories []string
+ for _, f := range files {
+ if !f.IsDir() {
+ continue
+ }
+ actualDirectories = append(actualDirectories, f.Name())
+ }
+ require.Equal(t, tc.expectedDirectories, actualDirectories)
+ })
+ }
+}
diff --git a/util/teststorage/appender.go b/util/teststorage/appender.go
index f1d336c243..6b1ba31f7d 100644
--- a/util/teststorage/appender.go
+++ b/util/teststorage/appender.go
@@ -76,9 +76,9 @@ func (s Sample) String() string {
if s.FH != nil {
fh = " " + s.FH.String()
}
- b.WriteString(fmt.Sprintf("%s %v%v%v st@%v t@%v", s.L.String(), s.V, h, fh, s.ST, s.T))
+ fmt.Fprintf(&b, "%s %v%v%v st@%v t@%v", s.L.String(), s.V, h, fh, s.ST, s.T)
if len(s.ES) > 0 {
- b.WriteString(fmt.Sprintf(" %v", s.ES))
+ fmt.Fprintf(&b, " %v", s.ES)
}
b.WriteString("\n")
return b.String()
diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go
index 32912c5a94..deb950b55a 100644
--- a/util/treecache/treecache.go
+++ b/util/treecache/treecache.go
@@ -265,8 +265,7 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr
}
}
- tc.wg.Add(1)
- go func() {
+ tc.wg.Go(func() {
numWatchers.Inc()
// Pass up zookeeper events, until the node is deleted.
select {
@@ -277,8 +276,7 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr
case <-node.done:
}
numWatchers.Dec()
- tc.wg.Done()
- }()
+ })
return nil
}
diff --git a/web/ui/mantine-ui/src/promql/functionDocs.tsx b/web/ui/mantine-ui/src/promql/functionDocs.tsx
index 4cc70a39e6..c7f744ba6f 100644
--- a/web/ui/mantine-ui/src/promql/functionDocs.tsx
+++ b/web/ui/mantine-ui/src/promql/functionDocs.tsx
@@ -1543,6 +1543,33 @@ const funcDocs: Record = {
>
),
+ histogram_quantiles: (
+ <>
+
+
+ This function has to be enabled via the{" "}
+ feature flag
+ --enable-feature=promql-experimental-functions.
+
+
+
+
+ histogram_quantiles(v instant-vector, quantile_label string, φ_1 scalar, φ_2 scalar, ...){" "}
+ calculates multiple (between 1 and 10) φ-quantiles (0 ≤ φ ≤ 1) from a{" "}
+ classic histogram or from a native
+ histogram. Quantile calculation works the same way as in histogram_quantile(). The second argument
+ (a string) specifies the label name that is used to identify different quantiles in the query result.
+
+
+
+
+ histogram_quantiles(sum(rate(foo[1m])), "quantile", 0.9, 0.99) # => {"{"}quantile="0.9"
+ {"}"} 123
+ {"{"}quantile="0.99"{"}"} 128
+
+
+ >
+ ),
histogram_stddev: (
<>
diff --git a/web/ui/mantine-ui/src/promql/functionSignatures.ts b/web/ui/mantine-ui/src/promql/functionSignatures.ts
index da21a2d4aa..837a271dce 100644
--- a/web/ui/mantine-ui/src/promql/functionSignatures.ts
+++ b/web/ui/mantine-ui/src/promql/functionSignatures.ts
@@ -69,6 +69,12 @@ export const functionSignatures: Record = {
variadic: 0,
returnType: valueType.vector,
},
+ histogram_quantiles: {
+ name: "histogram_quantiles",
+ argTypes: [valueType.vector, valueType.string, valueType.scalar, valueType.scalar],
+ variadic: 9,
+ returnType: valueType.vector,
+ },
histogram_stddev: {
name: "histogram_stddev",
argTypes: [valueType.vector],
diff --git a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts
index 3670fffff7..68d7b06553 100644
--- a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts
+++ b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts
@@ -243,6 +243,12 @@ export const functionIdentifierTerms = [
info: 'Calculate quantiles from native histograms and from conventional histogram buckets',
type: 'function',
},
+ {
+ label: 'histogram_quantiles',
+ detail: 'function',
+ info: 'Calculate multiple quantiles from native histograms and from conventional histogram buckets',
+ type: 'function',
+ },
{
label: 'histogram_sum',
detail: 'function',
diff --git a/web/ui/module/codemirror-promql/src/types/function.ts b/web/ui/module/codemirror-promql/src/types/function.ts
index cfbf3524b5..cc1c0524fb 100644
--- a/web/ui/module/codemirror-promql/src/types/function.ts
+++ b/web/ui/module/codemirror-promql/src/types/function.ts
@@ -44,6 +44,7 @@ import {
HistogramCount,
HistogramFraction,
HistogramQuantile,
+ HistogramQuantiles,
HistogramStdDev,
HistogramStdVar,
HistogramSum,
@@ -306,6 +307,12 @@ const promqlFunctions: { [key: number]: PromQLFunction } = {
variadic: 0,
returnType: ValueType.vector,
},
+ [HistogramQuantiles]: {
+ name: 'histogram_quantiles',
+ argTypes: [ValueType.vector, ValueType.string, ValueType.scalar, ValueType.scalar],
+ variadic: 10,
+ returnType: ValueType.vector,
+ },
[HistogramStdDev]: {
name: 'histogram_stddev',
argTypes: [ValueType.vector],
diff --git a/web/ui/module/lezer-promql/src/promql.grammar b/web/ui/module/lezer-promql/src/promql.grammar
index 9308ad01be..e4308186bb 100644
--- a/web/ui/module/lezer-promql/src/promql.grammar
+++ b/web/ui/module/lezer-promql/src/promql.grammar
@@ -167,6 +167,7 @@ FunctionIdentifier {
HistogramCount |
HistogramFraction |
HistogramQuantile |
+ HistogramQuantiles |
HistogramStdDev |
HistogramStdVar |
HistogramSum |
@@ -426,6 +427,7 @@ NumberDurationLiteralInDurationContext {
HistogramCount { condFn<"histogram_count"> }
HistogramFraction { condFn<"histogram_fraction"> }
HistogramQuantile { condFn<"histogram_quantile"> }
+ HistogramQuantiles { condFn<"histogram_quantiles"> }
HistogramStdDev { condFn<"histogram_stddev"> }
HistogramStdVar { condFn<"histogram_stdvar"> }
HistogramSum { condFn<"histogram_sum"> }
diff --git a/web/web_test.go b/web/web_test.go
index cbcf15ffdc..5ead252cbe 100644
--- a/web/web_test.go
+++ b/web/web_test.go
@@ -118,12 +118,10 @@ func TestReadyAndHealthy(t *testing.T) {
}
}()
- // Give some time for the web goroutine to run since we need the server
- // to be up before starting tests.
- time.Sleep(5 * time.Second)
-
baseURL := "http://localhost" + port
+ waitForServerReady(t, baseURL, 5*time.Second)
+
resp, err := http.Get(baseURL + "/-/healthy")
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
@@ -256,12 +254,10 @@ func TestRoutePrefix(t *testing.T) {
}
}()
- // Give some time for the web goroutine to run since we need the server
- // to be up before starting tests.
- time.Sleep(5 * time.Second)
-
baseURL := "http://localhost" + port
+ waitForServerReady(t, baseURL+opts.RoutePrefix, 5*time.Second)
+
resp, err := http.Get(baseURL + opts.RoutePrefix + "/-/healthy")
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
@@ -449,9 +445,9 @@ func TestShutdownWithStaleConnection(t *testing.T) {
close(closed)
}()
- // Give some time for the web goroutine to run since we need the server
- // to be up before starting tests.
- time.Sleep(5 * time.Second)
+ baseURL := "http://localhost" + port
+
+ waitForServerReady(t, baseURL, 5*time.Second)
// Open a socket, and don't use it. This connection should then be closed
// after the ReadTimeout.
@@ -500,23 +496,19 @@ func TestHandleMultipleQuitRequests(t *testing.T) {
close(closed)
}()
- // Give some time for the web goroutine to run since we need the server
- // to be up before starting tests.
- time.Sleep(5 * time.Second)
-
baseURL := opts.ExternalURL.Scheme + "://" + opts.ExternalURL.Host
+ waitForServerReady(t, baseURL, 5*time.Second)
+
start := make(chan struct{})
var wg sync.WaitGroup
for range 3 {
- wg.Add(1)
- go func() {
- defer wg.Done()
+ wg.Go(func() {
<-start
resp, err := http.Post(baseURL+"/-/quit", "", strings.NewReader(""))
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
- }()
+ })
}
close(start)
wg.Wait()
@@ -578,11 +570,10 @@ func TestAgentAPIEndPoints(t *testing.T) {
}
}()
- // Give some time for the web goroutine to run since we need the server
- // to be up before starting tests.
- time.Sleep(5 * time.Second)
baseURL := "http://localhost" + port + "/api/v1"
+ waitForServerReady(t, "http://localhost"+port, 5*time.Second)
+
// Test for non-available endpoints in the Agent mode.
for path, methods := range map[string][]string{
"/labels": {http.MethodGet, http.MethodPost},
@@ -711,9 +702,7 @@ func TestMultipleListenAddresses(t *testing.T) {
}
}()
- // Give some time for the web goroutine to run since we need the server
- // to be up before starting tests.
- time.Sleep(5 * time.Second)
+ waitForServerReady(t, "http://localhost"+port1, 5*time.Second)
// Set to ready.
webHandler.SetReady(Ready)
@@ -732,3 +721,24 @@ func TestMultipleListenAddresses(t *testing.T) {
cleanupTestResponse(t, resp)
}
}
+
+// Give some time for the web goroutine to run since we need the server
+// to be up before starting tests.
+func waitForServerReady(t *testing.T, baseURL string, timeout time.Duration) {
+ t.Helper()
+
+ interval := 100 * time.Millisecond
+ deadline := time.Now().Add(timeout)
+
+ for time.Now().Before(deadline) {
+ resp, err := http.Get(baseURL + "/-/healthy")
+ if resp != nil {
+ cleanupTestResponse(t, resp)
+ }
+ if err == nil && resp.StatusCode == http.StatusOK {
+ return
+ }
+ time.Sleep(interval)
+ }
+ t.Fatalf("Server did not become ready within %v", timeout)
+}