diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index 66a72af533..0000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,56 +0,0 @@ -# Configuration for probot-stale - https://github.com/probot/stale - -# Number of days of inactivity before an Issue or Pull Request becomes stale -daysUntilStale: 60 - -# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. -# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. -daysUntilClose: false - -# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) -onlyLabels: [] - -# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable -exemptLabels: - - keepalive - -# Set to true to ignore issues in a project (defaults to false) -exemptProjects: false - -# Set to true to ignore issues in a milestone (defaults to false) -exemptMilestones: false - -# Set to true to ignore issues with an assignee (defaults to false) -exemptAssignees: false - -# Label to use when marking as stale -staleLabel: stale - -# Comment to post when marking as stale. Set to `false` to disable -markComment: false - -# Comment to post when removing the stale label. -# unmarkComment: > -# Your comment here. - -# Comment to post when closing a stale Issue or Pull Request. -# closeComment: > -# Your comment here. - -# Limit the number of actions per hour, from 1-30. Default is 30 -limitPerRun: 30 - -# Limit to only `issues` or `pulls` -only: pulls - -# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': -# pulls: -# daysUntilStale: 30 -# markComment: > -# This pull request has been automatically marked as stale because it has not had -# recent activity. It will be closed if no further activity occurs. Thank you -# for your contributions. - -# issues: -# exemptLabels: -# - confirmed diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index acf91ea12a..3f6cf76e16 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 + - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index f52d20785f..632d38cb00 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -13,7 +13,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 + - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8b3624383c..9e614cb2db 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,12 +11,14 @@ jobs: container: # Whenever the Go version is updated here, .promu.yml # should also be updated. - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment - - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 + with: + enable_npm: true + - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 test-flags="" - run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false - run: make -C documentation/examples/remote_storage - run: make -C documentation/examples @@ -25,10 +27,10 @@ jobs: name: More Go tests runs-on: ubuntu-latest container: - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... - run: GOARCH=386 go test ./cmd/prometheus @@ -39,9 +41,12 @@ jobs: test_go_oldest: name: Go tests with previous Go version runs-on: ubuntu-latest + env: + # Enforce the Go version. + GOTOOLCHAIN: local container: # The go version in this image should be N-1 wrt test_go. - image: quay.io/prometheus/golang-builder:1.21-base + image: quay.io/prometheus/golang-builder:1.22-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - run: make build @@ -54,11 +59,11 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/setup_environment with: enable_go: false @@ -75,9 +80,9 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.x + go-version: 1.23.x - run: | $TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"} go test $TestTargets -vet=off -v @@ -89,7 +94,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - run: go install ./cmd/promtool/. @@ -107,6 +112,8 @@ jobs: if: | !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) && + !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) + && !(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-')) && !(github.event_name == 'push' && github.event.ref == 'refs/heads/main') @@ -115,7 +122,7 @@ jobs: thread: [ 0, 1, 2 ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/build with: promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" @@ -127,6 +134,8 @@ jobs: if: | (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) + || (github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-')) || (github.event_name == 'push' && github.event.ref == 'refs/heads/main') @@ -138,11 +147,23 @@ jobs: # should also be updated. steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/build with: parallelism: 12 thread: ${{ matrix.thread }} + build_all_status: + name: Report status of build Prometheus for all architectures + runs-on: ubuntu-latest + needs: [build_all] + if: github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-') + steps: + - name: Successful build + if: ${{ !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled')) }} + run: exit 0 + - name: Failing or cancelled build + if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} + run: exit 1 check_generated_parser: name: Check generated parser runs-on: ubuntu-latest @@ -150,10 +171,10 @@ jobs: - name: Checkout repository uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: cache: false - go-version: 1.22.x + go-version: 1.23.x - name: Run goyacc and check for diff run: make install-goyacc check-generated-parser golangci: @@ -163,18 +184,18 @@ jobs: - name: Checkout repository uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.x + go-version: 1.23.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 + uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. - version: v1.59.1 + version: v1.60.2 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' @@ -188,7 +209,7 @@ jobs: if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/publish_main with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -199,10 +220,13 @@ jobs: name: Publish release artefacts runs-on: ubuntu-latest needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') + if: | + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: ./.github/promci/actions/publish_release with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -217,9 +241,9 @@ jobs: steps: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - name: Install nodejs - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" @@ -230,17 +254,26 @@ jobs: restore-keys: | ${{ runner.os }}-node- - name: Check libraries version - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') - run: ./scripts/ui_release.sh --check-package "$(echo ${{ github.ref_name }}|sed s/v2/v0/)" + if: | + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) + run: ./scripts/ui_release.sh --check-package "$(./scripts/get_module_version.sh ${{ github.ref_name }})" - name: build run: make assets - name: Copy files before publishing libs run: ./scripts/ui_release.sh --copy - name: Publish dry-run libraries - if: "!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))" + if: | + !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + && + !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) run: ./scripts/ui_release.sh --publish dry-run - name: Publish libraries - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') + if: | + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) run: ./scripts/ui_release.sh --publish env: # The setup-node action writes an .npmrc file with this env variable diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1ea1f5efae..89aa2ba29b 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,12 +27,12 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Initialize CodeQL - uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/init@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/autobuild@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/analyze@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index dc510e5966..80356e45bf 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -21,7 +21,7 @@ jobs: fuzz-seconds: 600 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 078084888a..7e0ed7dc36 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -26,7 +26,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3 + uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # tag=v2.4.0 with: results_file: results.sarif results_format: sarif @@ -37,7 +37,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # tag=v4.4.0 with: name: SARIF file path: results.sarif @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # tag=v3.25.8 + uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # tag=v3.26.6 with: sarif_file: results.sarif diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000..d71bcbc9d8 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,31 @@ +name: Stale Check +on: + workflow_dispatch: {} + schedule: + - cron: '16 22 * * *' +permissions: + issues: write + pull-requests: write +jobs: + stale: + if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. + runs-on: ubuntu-latest + steps: + - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + # opt out of defaults to avoid marking issues as stale and closing them + # https://github.com/actions/stale#days-before-close + # https://github.com/actions/stale#days-before-stale + days-before-stale: -1 + days-before-close: -1 + # Setting it to empty string to skip comments. + # https://github.com/actions/stale#stale-pr-message + # https://github.com/actions/stale#stale-issue-message + stale-pr-message: '' + stale-issue-message: '' + operations-per-run: 30 + # override days-before-stale, for only marking the pull requests as stale + days-before-pr-stale: 60 + stale-pr-label: stale + exempt-pr-labels: keepalive diff --git a/.gitignore b/.gitignore index e85d766b09..0d99305f69 100644 --- a/.gitignore +++ b/.gitignore @@ -22,7 +22,7 @@ benchmark.txt /documentation/examples/remote_storage/example_write_adapter/example_write_adapter npm_licenses.tar.bz2 -/web/ui/static/react +/web/ui/static /vendor /.build diff --git a/.golangci.yml b/.golangci.yml index 026d68a313..303cd33d8b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,12 +1,5 @@ run: timeout: 15m - skip-files: - # Skip autogenerated files. - - ^.*\.(pb|y)\.go$ - skip-dirs: - # Copied it from a different source - - storage/remote/otlptranslator/prometheusremotewrite - - storage/remote/otlptranslator/prometheus output: sort-results: true @@ -32,8 +25,34 @@ linters: - loggercheck issues: + max-issues-per-linter: 0 max-same-issues: 0 + # The default exclusions are too aggressive. For one, they + # essentially disable any linting on doc comments. We disable + # default exclusions here and add exclusions fitting our codebase + # further down. + exclude-use-default: false + exclude-files: + # Skip autogenerated files. + - ^.*\.(pb|y)\.go$ + exclude-dirs: + # Copied it from a different source. + - storage/remote/otlptranslator/prometheusremotewrite + - storage/remote/otlptranslator/prometheus exclude-rules: + - linters: + - errcheck + # Taken from the default exclusions (that are otherwise disabled above). + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + - linters: + - govet + # We use many Seek methods that do not follow the usual pattern. + text: "stdmethods: method Seek.* should have signature Seek" + - linters: + - revive + # We have stopped at some point to write doc comments on exported symbols. + # TODO(beorn7): Maybe we should enforce this again? There are ~500 offenders right now. + text: exported (.+) should have comment( \(or a comment on this block\))? or be unexported - linters: - gocritic text: "appendAssign" @@ -94,15 +113,14 @@ linters-settings: errorf: false revive: # By default, revive will enable only the linting rules that are named in the configuration file. - # So, it's needed to explicitly set in configuration all required rules. - # The following configuration enables all the rules from the defaults.toml - # https://github.com/mgechev/revive/blob/master/defaults.toml + # So, it's needed to explicitly enable all required rules here. rules: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md - name: blank-imports + - name: comment-spacings - name: context-as-argument arguments: - # allow functions with test or bench signatures + # Allow functions with test or bench signatures. - allowTypesBefore: "*testing.T,testing.TB" - name: context-keys-type - name: dot-imports @@ -118,6 +136,8 @@ linters-settings: - name: increment-decrement - name: indent-error-flow - name: package-comments + # TODO(beorn7): Currently, we have a lot of missing package doc comments. Maybe we should have them. + disabled: true - name: range - name: receiver-naming - name: redefines-builtin-id diff --git a/.promu.yml b/.promu.yml index 0aa51d6d31..6feaa6ef64 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,7 +1,7 @@ go: # Whenever the Go version is updated here, # .github/workflows should also be updated. - version: 1.22 + version: 1.23 repository: path: github.com/prometheus/prometheus build: @@ -28,8 +28,6 @@ tarball: # Whenever there are new files to include in the tarball, # remember to make sure the new files will be generated after `make build`. files: - - consoles - - console_libraries - documentation/examples/prometheus.yml - LICENSE - NOTICE diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e47999934..7fbdadfa62 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,119 @@ ## unreleased +* [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930 +* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910 + +## 3.0.0-beta.0 / 2024-09-05 + +Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes. + +As is traditional with a beta release, we do **not** recommend users install 3.0.0-beta on critical production systems, but we do want everyone to test it out and find bugs. + +* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872 +* [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904 +* [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365 +* [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365 +* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705 +* [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807 +* [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770 +* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747 +* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526 +* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 +* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 +* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029 + +## 2.55.0-rc.0 / 2024-09-20 + +* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727 +* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 +* [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817 +* [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815 +* [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734 +* [FEATURE] OTLP receiver: Optional promotion of resource attributes to series labels. #14200 +* [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346 +* [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403 +* [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506 +* [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706 +* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 +* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379 +* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450 +* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477 +* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655 +* [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621 +* [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413 +* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816 +* [ENHANCEMENT] API: Support multiple listening addresses. #14665 +* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934 +* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948 +* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729 +* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147 +* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622 +* [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810 +* [BUGFIX] Remote-Write: fix metadata sending for experimental Remote-Write V2. #14766 +* [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716 +* [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14513, #14575, #14598, #14609, #14611, #14771, #14821 +* [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042 + +## 2.54.1 / 2024-08-27 + +* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685 +* [BUGFIX] Docker SD: fix crash in `match_first_network` mode when container is reconnected to a new network. #14654 +* [BUGFIX] PromQL: fix experimental native histograms getting corrupted due to vector selector bug in range queries. #14538 +* [BUGFIX] PromQL: fix experimental native histogram counter reset detection on stale samples. #14514 +* [BUGFIX] PromQL: fix native histograms getting corrupted due to vector selector bug in range queries. #14605 + +## 2.54.0 / 2024-08-09 + +Release 2.54 brings a release candidate of a major new version of [Remote Write: 2.0](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +This is experimental at this time and may still change. +Remote-write v2 is enabled by default, but can be disabled via feature-flag `web.remote-write-receiver.accepted-protobuf-messages`. + +* [CHANGE] Remote-Write: `highest_timestamp_in_seconds` and `queue_highest_sent_timestamp_seconds` metrics now initialized to 0. #14437 +* [CHANGE] API: Split warnings from info annotations in API response. #14327 +* [FEATURE] Remote-Write: Version 2.0 experimental, plus metadata in WAL via feature flag `metadata-wal-records` (defaults on). #14395,#14427,#14444 +* [FEATURE] PromQL: add limitk() and limit_ratio() aggregation operators. #12503 +* [ENHANCEMENT] PromQL: Accept underscores in literal numbers, e.g. 1_000_000 for 1 million. #12821 +* [ENHANCEMENT] PromQL: float literal numbers and durations are now interchangeable (experimental). Example: `time() - my_timestamp > 10m`. #9138 +* [ENHANCEMENT] PromQL: use Kahan summation for sum(). #14074,#14362 +* [ENHANCEMENT] PromQL (experimental native histograms): Optimize `histogram_count` and `histogram_sum` functions. #14097 +* [ENHANCEMENT] TSDB: Better support for out-of-order experimental native histogram samples. #14438 +* [ENHANCEMENT] TSDB: Optimise seek within index. #14393 +* [ENHANCEMENT] TSDB: Optimise deletion of stale series. #14307 +* [ENHANCEMENT] TSDB: Reduce locking to optimise adding and removing series. #13286,#14286 +* [ENHANCEMENT] TSDB: Small optimisation: streamline special handling for out-of-order data. #14396,#14584 +* [ENHANCEMENT] Regexps: Optimize patterns with multiple prefixes. #13843,#14368 +* [ENHANCEMENT] Regexps: Optimize patterns containing multiple literal strings. #14173 +* [ENHANCEMENT] AWS SD: expose Primary IPv6 addresses as __meta_ec2_primary_ipv6_addresses. #14156 +* [ENHANCEMENT] Docker SD: add MatchFirstNetwork for containers with multiple networks. #10490 +* [ENHANCEMENT] OpenStack SD: Use `flavor.original_name` if available. #14312 +* [ENHANCEMENT] UI (experimental native histograms): more accurate representation. #13680,#14430 +* [ENHANCEMENT] Agent: `out_of_order_time_window` config option now applies to agent. #14094 +* [ENHANCEMENT] Notifier: Send any outstanding Alertmanager notifications when shutting down. #14290 +* [ENHANCEMENT] Rules: Add label-matcher support to Rules API. #10194 +* [ENHANCEMENT] HTTP API: Add url to message logged on error while sending response. #14209 +* [BUGFIX] TSDB: Exclude OOO chunks mapped after compaction starts (introduced by #14396). #14584 +* [BUGFIX] CLI: escape `|` characters when generating docs. #14420 +* [BUGFIX] PromQL (experimental native histograms): Fix some binary operators between native histogram values. #14454 +* [BUGFIX] TSDB: LabelNames API could fail during compaction. #14279 +* [BUGFIX] TSDB: Fix rare issue where pending OOO read can be left dangling if creating querier fails. #14341 +* [BUGFIX] TSDB: fix check for context cancellation in LabelNamesFor. #14302 +* [BUGFIX] Rules: Fix rare panic on reload. #14366 +* [BUGFIX] Config: In YAML marshalling, do not output a regexp field if it was never set. #14004 +* [BUGFIX] Remote-Write: reject samples with future timestamps. #14304 +* [BUGFIX] Remote-Write: Fix data corruption in remote write if max_sample_age is applied. #14078 +* [BUGFIX] Notifier: Fix Alertmanager discovery not updating under heavy load. #14174 +* [BUGFIX] Regexes: some Unicode characters were not matched by case-insensitive comparison. #14170,#14299 +* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515 + +## 2.53.1 / 2024-07-10 + +Fix a bug which would drop samples in remote-write if the sending flow stalled +for longer than it takes to write one "WAL segment". How long this takes depends on the size +of your Prometheus; as a rough guide with 10 million series it is about 2-3 minutes. + +* [BUGFIX] Remote-write: stop dropping samples in catch-up #14446 + ## 2.53.0 / 2024-06-16 This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75. @@ -40,7 +153,7 @@ This release changes the default for GOGC, the Go runtime control for the trade- * [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754 * [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772 * [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823 -* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838 +* [ENHANCEMENT] Observability: Log chunk snapshot and mmapped chunk replay duration during WAL replay. #13838 * [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846 * [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667 * [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852 @@ -577,7 +690,7 @@ The binaries published with this release are built with Go1.17.8 to avoid [CVE-2 ## 2.33.0 / 2022-01-29 -* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121 +* [CHANGE] PromQL: Promote negative offset and `@` modifier to stable features. #10121 * [CHANGE] Web: Promote remote-write-receiver to stable. #10119 * [FEATURE] Config: Add `stripPort` template function. #10002 * [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045 @@ -814,7 +927,7 @@ This vulnerability has been reported by Aaron Devaney from MDSec. * [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682 * [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659 * [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790 -* [BUGFIX] TSDB: Avoid panic when mmaped memory is referenced after the file is closed. #8723 +* [BUGFIX] TSDB: Avoid panic when mmapped memory is referenced after the file is closed. #8723 * [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737 * [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766 @@ -1740,7 +1853,7 @@ information, read the announcement blog post and migration guide. ## 1.7.0 / 2017-06-06 * [CHANGE] Compress remote storage requests and responses with unframed/raw snappy. -* [CHANGE] Properly ellide secrets in config. +* [CHANGE] Properly elide secrets in config. * [FEATURE] Add OpenStack service discovery. * [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces. * [FEATURE] Add metric for discovered number of Alertmanagers. diff --git a/Dockerfile b/Dockerfile index b47f77dcd6..b96b3b765d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,21 +8,16 @@ ARG OS="linux" COPY .build/${OS}-${ARCH}/prometheus /bin/prometheus COPY .build/${OS}-${ARCH}/promtool /bin/promtool COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml -COPY console_libraries/ /usr/share/prometheus/console_libraries/ -COPY consoles/ /usr/share/prometheus/consoles/ COPY LICENSE /LICENSE COPY NOTICE /NOTICE COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2 WORKDIR /prometheus -RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ && \ - chown -R nobody:nobody /etc/prometheus /prometheus +RUN chown -R nobody:nobody /etc/prometheus /prometheus USER nobody EXPOSE 9090 VOLUME [ "/prometheus" ] ENTRYPOINT [ "/bin/prometheus" ] CMD [ "--config.file=/etc/prometheus/prometheus.yml", \ - "--storage.tsdb.path=/prometheus", \ - "--web.console.libraries=/usr/share/prometheus/console_libraries", \ - "--web.console.templates=/usr/share/prometheus/consoles" ] + "--storage.tsdb.path=/prometheus" ] diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 3661ddaa0a..7f4153abc1 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -13,13 +13,12 @@ Maintainers for specific parts of the codebase: * `k8s`: Frederic Branczyk ( / @brancz) * `documentation` * `prometheus-mixin`: Matthias Loibl ( / @metalmatze) -* `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7), +* `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7), George Krajcsovits ( / @krajorama) * `storage` * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( / @npazosmendez), Alex Greenbank ( / @alexgreenbank) * `otlptranslator`: Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez) * `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez) - * `agent`: Robert Fratto ( / @rfratto) * `web` * `ui`: Julius Volz ( / @juliusv) * `module`: Augustin Husson ( @nexucis) diff --git a/Makefile b/Makefile index f2bb3fcb7a..0b5935de00 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,11 @@ include Makefile.common DOCKER_IMAGE_NAME ?= prometheus +# Only build UI if PREBUILT_ASSETS_STATIC_DIR is not set +ifdef PREBUILT_ASSETS_STATIC_DIR + SKIP_UI_BUILD = true +endif + .PHONY: update-npm-deps update-npm-deps: @echo ">> updating npm dependencies" @@ -42,13 +47,17 @@ upgrade-npm-deps: .PHONY: ui-bump-version ui-bump-version: - version=$$(sed s/2/0/ < VERSION) && ./scripts/ui_release.sh --bump-version "$${version}" + version=$$(./scripts/get_module_version.sh) && ./scripts/ui_release.sh --bump-version "$${version}" cd web/ui && npm install git add "./web/ui/package-lock.json" "./**/package.json" .PHONY: ui-install ui-install: cd $(UI_PATH) && npm install + # The old React app has been separated from the npm workspaces setup to avoid + # issues with conflicting dependencies. This is a temporary solution until the + # new Mantine-based UI is fully integrated and the old app can be removed. + cd $(UI_PATH)/react-app && npm install .PHONY: ui-build ui-build: @@ -65,10 +74,30 @@ ui-test: .PHONY: ui-lint ui-lint: cd $(UI_PATH) && npm run lint + # The old React app has been separated from the npm workspaces setup to avoid + # issues with conflicting dependencies. This is a temporary solution until the + # new Mantine-based UI is fully integrated and the old app can be removed. + cd $(UI_PATH)/react-app && npm run lint .PHONY: assets +ifndef SKIP_UI_BUILD assets: ui-install ui-build +.PHONY: npm_licenses +npm_licenses: ui-install + @echo ">> bundling npm licenses" + rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses + ln -s . npm_licenses + find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=- + rm -f npm_licenses +else +assets: + @echo '>> skipping assets build, pre-built assets provided' + +npm_licenses: + @echo '>> skipping assets npm licenses, pre-built assets provided' +endif + .PHONY: assets-compress assets-compress: assets @echo '>> compressing assets' @@ -117,14 +146,6 @@ else test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version endif -.PHONY: npm_licenses -npm_licenses: ui-install - @echo ">> bundling npm licenses" - rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses - ln -s . npm_licenses - find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=- - rm -f npm_licenses - .PHONY: tarball tarball: npm_licenses common-tarball diff --git a/Makefile.common b/Makefile.common index e3da72ab47..cbb5d86382 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.1 +GOLANGCI_LINT_VERSION ?= v1.60.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -275,3 +275,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/README.md b/README.md index cd14ed2ecb..7528147b0e 100644 --- a/README.md +++ b/README.md @@ -12,9 +12,10 @@ examples and guides.

[![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub] [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus) +[![CLOMonitor](https://img.shields.io/endpoint?url=https://clomonitor.io/api/projects/cncf/prometheus/badge)](https://clomonitor.io/projects/cncf/prometheus) [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus) -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus) @@ -114,7 +115,7 @@ The Makefile provides several targets: Prometheus is bundled with many service discovery plugins. When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml) -file to disable some service discoveries. The file is a yaml-formated list of go +file to disable some service discoveries. The file is a yaml-formatted list of go import path that will be built into the Prometheus binary. After you have changed the file, you diff --git a/RELEASE.md b/RELEASE.md index f9a42be6b8..8e78a6a3ec 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -57,7 +57,9 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) | | v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) | | v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) | -| v2.53 | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) | +| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) | +| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) | +| v2.55 | 2024-09-17 | Bryan Boreham (GitHub: @bboreham) | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. @@ -186,7 +188,7 @@ the Prometheus server, we use major version zero releases for the libraries. Tag the new library release via the following commands: ```bash -tag="v$(sed s/2/0/ < VERSION)" +tag="v$(./scripts/get_module_version.sh)" git tag -s "${tag}" -m "${tag}" git push origin "${tag}" ``` diff --git a/SECURITY-INSIGHTS.yml b/SECURITY-INSIGHTS.yml new file mode 100644 index 0000000000..009b356214 --- /dev/null +++ b/SECURITY-INSIGHTS.yml @@ -0,0 +1,48 @@ +header: + schema-version: '1.0.0' + expiration-date: '2025-07-30T01:00:00.000Z' + last-updated: '2024-07-30' + last-reviewed: '2024-07-30' + project-url: https://github.com/prometheus/prometheus + changelog: https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md + license: https://github.com/prometheus/prometheus/blob/main/LICENSE +project-lifecycle: + status: active + bug-fixes-only: false + core-maintainers: + - https://github.com/prometheus/prometheus/blob/main/MAINTAINERS.md +contribution-policy: + accepts-pull-requests: true + accepts-automated-pull-requests: true +dependencies: + third-party-packages: true + dependencies-lists: + - https://github.com/prometheus/prometheus/blob/main/go.mod + - https://github.com/prometheus/prometheus/blob/main/web/ui/package.json + env-dependencies-policy: + policy-url: https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md#dependency-management +distribution-points: + - https://github.com/prometheus/prometheus/releases +documentation: + - https://prometheus.io/docs/introduction/overview/ +security-contacts: + - type: email + value: prometheus-team@googlegroups.com +security-testing: + - tool-type: sca + tool-name: Dependabot + tool-version: latest + integration: + ad-hoc: false + ci: true + before-release: true + - tool-type: sast + tool-name: CodeQL + tool-version: latest + integration: + ad-hoc: false + ci: true + before-release: true +vulnerability-reporting: + accepts-vulnerability-reports: true + security-policy: https://github.com/prometheus/prometheus/security/policy diff --git a/VERSION b/VERSION index 261d95596f..7e9b524994 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.53.0 +3.0.0-beta.0 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 7544f276a6..e7fd82e6f3 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -58,8 +58,6 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/legacymanager" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -103,6 +101,8 @@ var ( ) func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation prometheus.MustRegister(versioncollector.NewCollector(strings.ReplaceAll(appName, "-", "_"))) var err error @@ -152,13 +152,16 @@ type flagConfig struct { queryConcurrency int queryMaxSamples int RemoteFlushDeadline model.Duration + nameEscapingScheme string + + enableAutoReload bool + autoReloadInterval model.Duration featureList []string memlimitRatio float64 // These options are extracted from featureList // for ease of use. enableExpandExternalLabels bool - enableNewSDManager bool enablePerStepStats bool enableAutoGOMAXPROCS bool enableAutoGOMEMLIMIT bool @@ -168,6 +171,8 @@ type flagConfig struct { corsRegexString string promlogConfig promlog.Config + + promqlEnableDelayedNameRemoval bool } // setFeatureListOptions sets the corresponding options from the featureList. @@ -176,9 +181,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { opts := strings.Split(f, ",") for _, o := range opts { switch o { - case "remote-write-receiver": - c.web.EnableRemoteWriteReceiver = true - level.Warn(logger).Log("msg", "Remote write receiver enabled via feature flag remote-write-receiver. This is DEPRECATED. Use --web.enable-remote-write-receiver.") case "otlp-write-receiver": c.web.EnableOTLPWriteReceiver = true level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled") @@ -194,18 +196,21 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "extra-scrape-metrics": c.scrape.ExtraMetrics = true level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled") - case "new-service-discovery-manager": - c.enableNewSDManager = true - level.Info(logger).Log("msg", "Experimental service discovery manager") - case "agent": - agentMode = true - level.Info(logger).Log("msg", "Experimental agent mode enabled.") + case "metadata-wal-records": + c.scrape.AppendMetadata = true + level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0") case "promql-per-step-stats": c.enablePerStepStats = true level.Info(logger).Log("msg", "Experimental per-step statistics reporting") case "auto-gomaxprocs": c.enableAutoGOMAXPROCS = true level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota") + case "auto-reload-config": + c.enableAutoReload = true + if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 { + c.autoReloadInterval, _ = model.ParseDuration("1s") + } + level.Info(logger).Log("msg", fmt.Sprintf("Enabled automatic configuration file reloading. Checking for configuration changes every %s.", c.autoReloadInterval)) case "auto-gomemlimit": c.enableAutoGOMEMLIMIT = true level.Info(logger).Log("msg", "Automatically set GOMEMLIMIT to match Linux container or system memory limit") @@ -225,16 +230,26 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + case "ooo-native-histograms": + c.tsdb.EnableOOONativeHistograms = true + level.Info(logger).Log("msg", "Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true") case "created-timestamp-zero-ingestion": c.scrape.EnableCreatedTimestampZeroIngestion = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + case "delayed-compaction": + c.tsdb.EnableDelayedCompaction = true + level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.") + case "promql-delayed-name-removal": + c.promqlEnableDelayedNameRemoval = true + level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.") case "": continue - case "promql-at-modifier", "promql-negative-offset": - level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o) + case "old-ui": + c.web.UseOldUI = true + level.Info(logger).Log("msg", "Serving previous version of the Prometheus web UI.") default: level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o) } @@ -250,11 +265,6 @@ func main() { runtime.SetMutexProfileFraction(20) } - var ( - oldFlagRetentionDuration model.Duration - newFlagRetentionDuration model.Duration - ) - // Unregister the default GoCollector, and reregister with our defaults. if prometheus.Unregister(collectors.NewGoCollector()) { prometheus.MustRegister( @@ -287,8 +297,11 @@ func main() { a.Flag("config.file", "Prometheus configuration file path."). Default("prometheus.yml").StringVar(&cfg.configFile) - a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry."). - Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress) + a.Flag("config.auto-reload-interval", "Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes."). + Default("30s").SetValue(&cfg.autoReloadInterval) + + a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated."). + Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses) a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory"). Default("0.9").FloatVar(&cfg.memlimitRatio) @@ -302,7 +315,7 @@ func main() { "Maximum duration before timing out read of the request, and closing idle connections."). Default("5m").SetValue(&cfg.webTimeout) - a.Flag("web.max-connections", "Maximum number of simultaneous connections."). + a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners."). Default("512").IntVar(&cfg.web.MaxConnections) a.Flag("web.external-url", @@ -322,9 +335,15 @@ func main() { a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions."). Default("false").BoolVar(&cfg.web.EnableAdminAPI) + // TODO(bwplotka): Consider allowing those remote receive flags to be changed in config. + // See https://github.com/prometheus/prometheus/issues/14410 a.Flag("web.enable-remote-write-receiver", "Enable API endpoint accepting remote write requests."). Default("false").BoolVar(&cfg.web.EnableRemoteWriteReceiver) + supportedRemoteWriteProtoMsgs := config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} + a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())). + Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs)) + a.Flag("web.console.templates", "Path to the console template directory, available at /consoles."). Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath) @@ -355,11 +374,8 @@ func main() { "Size at which to split the tsdb WAL segment files. Example: 100MB"). Hidden().PlaceHolder("").BytesVar(&cfg.tsdb.WALSegmentSize) - serverOnlyFlag(a, "storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead."). - SetValue(&oldFlagRetentionDuration) - - serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms."). - SetValue(&newFlagRetentionDuration) + serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. If neither this flag nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms."). + SetValue(&cfg.tsdb.RetentionDuration) serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\". Based on powers-of-2, so 1KB is 1024B."). BytesVar(&cfg.tsdb.MaxBytes) @@ -367,11 +383,6 @@ func main() { serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory."). Default("false").BoolVar(&cfg.tsdb.NoLockfile) - // TODO: Remove in Prometheus 3.0. - var b bool - serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now."). - Default("true").Hidden().BoolVar(&b) - serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) @@ -448,9 +459,6 @@ func main() { serverOnlyFlag(a, "alertmanager.drain-notification-queue-on-shutdown", "Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down."). Default("true").BoolVar(&cfg.notifier.DrainOnShutdown) - // TODO: Remove in Prometheus 3.0. - alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String() - serverOnlyFlag(a, "query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation."). Default("5m").SetValue(&cfg.lookbackDelta) @@ -466,9 +474,11 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) + a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) + promlogflag.AddFlags(a, &cfg.promlogConfig) a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error { @@ -494,6 +504,15 @@ func main() { os.Exit(1) } + if cfg.nameEscapingScheme != "" { + scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme) + if err != nil { + fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme) + os.Exit(1) + } + model.NameEscapingScheme = scheme + } + if agentMode && len(serverOnlyFlags) > 0 { fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags) os.Exit(3) @@ -514,7 +533,7 @@ func main() { localStoragePath = cfg.agentStoragePath } - cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress) + cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddresses[0]) if err != nil { fmt.Fprintln(os.Stderr, fmt.Errorf("parse external URL %q: %w", cfg.prometheusURL, err)) os.Exit(2) @@ -526,10 +545,6 @@ func main() { os.Exit(2) } - if *alertmanagerTimeout != "" { - level.Warn(logger).Log("msg", "The flag --alertmanager.timeout has no effect and will be removed in the future.") - } - // Throw error for invalid config before starting other components. var cfgFile *config.Config if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil { @@ -576,17 +591,6 @@ func main() { cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/") if !agentMode { - // Time retention settings. - if oldFlagRetentionDuration != 0 { - level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.") - cfg.tsdb.RetentionDuration = oldFlagRetentionDuration - } - - // When the new flag is set it takes precedence. - if newFlagRetentionDuration != 0 { - cfg.tsdb.RetentionDuration = newFlagRetentionDuration - } - if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 { cfg.tsdb.RetentionDuration = defaultRetentionDuration level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) @@ -646,7 +650,7 @@ func main() { var ( localStorage = &readyStorage{stats: tsdb.NewDBStats()} scraper = &readyScrapeManager{} - remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper) + remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) ) @@ -658,8 +662,8 @@ func main() { ctxScrape, cancelScrape = context.WithCancel(context.Background()) ctxNotify, cancelNotify = context.WithCancel(context.Background()) - discoveryManagerScrape discoveryManager - discoveryManagerNotify discoveryManager + discoveryManagerScrape *discovery.Manager + discoveryManagerNotify *discovery.Manager ) // Kubernetes client metrics are used by Kubernetes SD. @@ -679,47 +683,22 @@ func main() { os.Exit(1) } - if cfg.enableNewSDManager { - { - discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager scrape") - os.Exit(1) - } - discoveryManagerScrape = discMgr - } + discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) + if discoveryManagerScrape == nil { + level.Error(logger).Log("msg", "failed to create a discovery manager scrape") + os.Exit(1) + } - { - discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager notify") - os.Exit(1) - } - discoveryManagerNotify = discMgr - } - } else { - { - discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("scrape")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager scrape") - os.Exit(1) - } - discoveryManagerScrape = discMgr - } - - { - discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("notify")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager notify") - os.Exit(1) - } - discoveryManagerNotify = discMgr - } + discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) + if discoveryManagerNotify == nil { + level.Error(logger).Log("msg", "failed to create a discovery manager notify") + os.Exit(1) } scrapeManager, err := scrape.NewManager( &cfg.scrape, log.With(logger, "component", "scrape manager"), + func(s string) (log.Logger, error) { return logging.NewJSONFileLogger(s) }, fanoutStorage, prometheus.DefaultRegisterer, ) @@ -769,9 +748,10 @@ func main() { NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get, // EnableAtModifier and EnableNegativeOffset have to be // always on for regular PromQL as of Prometheus v2.33. - EnableAtModifier: true, - EnableNegativeOffset: true, - EnablePerStepStats: cfg.enablePerStepStats, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: cfg.enablePerStepStats, + EnableDelayedNameRemoval: cfg.promqlEnableDelayedNameRemoval, } queryEngine = promql.NewEngine(opts) @@ -960,15 +940,21 @@ func main() { }) } - listener, err := webHandler.Listener() + listeners, err := webHandler.Listeners() if err != nil { - level.Error(logger).Log("msg", "Unable to start web listener", "err", err) + level.Error(logger).Log("msg", "Unable to start web listeners", "err", err) + if err := queryEngine.Close(); err != nil { + level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + } os.Exit(1) } err = toolkit_web.Validate(*webConfig) if err != nil { level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err) + if err := queryEngine.Close(); err != nil { + level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + } os.Exit(1) } @@ -990,11 +976,14 @@ func main() { case <-cancel: reloadReady.Close() } + if err := queryEngine.Close(); err != nil { + level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + } return nil }, func(err error) { close(cancel) - webHandler.SetReady(false) + webHandler.SetReady(web.Stopping) }, ) } @@ -1084,6 +1073,15 @@ func main() { hup := make(chan os.Signal, 1) signal.Notify(hup, syscall.SIGHUP) cancel := make(chan struct{}) + + var checksum string + if cfg.enableAutoReload { + checksum, err = config.GenerateChecksum(cfg.configFile) + if err != nil { + level.Error(logger).Log("msg", "Failed to generate initial checksum for configuration file", "err", err) + } + } + g.Add( func() error { <-reloadReady.C @@ -1093,6 +1091,12 @@ func main() { case <-hup: if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { level.Error(logger).Log("msg", "Error reloading config", "err", err) + } else if cfg.enableAutoReload { + if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { + checksum = currentChecksum + } else { + level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) + } } case rc := <-webHandler.Reload(): if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { @@ -1100,6 +1104,30 @@ func main() { rc <- err } else { rc <- nil + if cfg.enableAutoReload { + if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { + checksum = currentChecksum + } else { + level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) + } + } + } + case <-time.Tick(time.Duration(cfg.autoReloadInterval)): + if !cfg.enableAutoReload { + continue + } + currentChecksum, err := config.GenerateChecksum(cfg.configFile) + if err != nil { + level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) + } else if currentChecksum == checksum { + continue + } + level.Info(logger).Log("msg", "Configuration file change detected, reloading the configuration.") + + if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { + level.Error(logger).Log("msg", "Error reloading config", "err", err) + } else { + checksum = currentChecksum } case <-cancel: return nil @@ -1132,7 +1160,7 @@ func main() { reloadReady.Close() - webHandler.SetReady(true) + webHandler.SetReady(web.Ready) level.Info(logger).Log("msg", "Server is ready to receive web requests.") <-cancel return nil @@ -1257,7 +1285,7 @@ func main() { // Web handler. g.Add( func() error { - if err := webHandler.Run(ctxWeb, listener, *webConfig); err != nil { + if err := webHandler.Run(ctxWeb, listeners, *webConfig); err != nil { return fmt.Errorf("error starting web server: %w", err) } return nil @@ -1706,6 +1734,9 @@ type tsdbOptions struct { MaxExemplars int64 EnableMemorySnapshotOnShutdown bool EnableNativeHistograms bool + EnableDelayedCompaction bool + EnableOverlappingCompaction bool + EnableOOONativeHistograms bool } func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { @@ -1725,8 +1756,10 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { MaxExemplars: opts.MaxExemplars, EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown, EnableNativeHistograms: opts.EnableNativeHistograms, + EnableOOONativeHistograms: opts.EnableOOONativeHistograms, OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow, - EnableOverlappingCompaction: true, + EnableDelayedCompaction: opts.EnableDelayedCompaction, + EnableOverlappingCompaction: opts.EnableOverlappingCompaction, } } @@ -1759,11 +1792,38 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option } } -// discoveryManager interfaces the discovery manager. This is used to keep using -// the manager that restarts SD's on reload for a few releases until we feel -// the new manager can be enabled for all users. -type discoveryManager interface { - ApplyConfig(cfg map[string]discovery.Configs) error - Run() error - SyncCh() <-chan map[string][]*targetgroup.Group +// rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum. +type rwProtoMsgFlagParser struct { + msgs *[]config.RemoteWriteProtoMsg +} + +func rwProtoMsgFlagValue(msgs *[]config.RemoteWriteProtoMsg) kingpin.Value { + return &rwProtoMsgFlagParser{msgs: msgs} +} + +// IsCumulative is used by kingpin to tell if it's an array or not. +func (p *rwProtoMsgFlagParser) IsCumulative() bool { + return true +} + +func (p *rwProtoMsgFlagParser) String() string { + ss := make([]string, 0, len(*p.msgs)) + for _, t := range *p.msgs { + ss = append(ss, string(t)) + } + return strings.Join(ss, ",") +} + +func (p *rwProtoMsgFlagParser) Set(opt string) error { + t := config.RemoteWriteProtoMsg(opt) + if err := t.Validate(); err != nil { + return err + } + for _, prev := range *p.msgs { + if prev == t { + return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs) + } + } + *p.msgs = append(*p.msgs, t) + return nil } diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 89c171bb5b..c16864cb8c 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -30,16 +30,23 @@ import ( "testing" "time" + "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/rules" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + const startupTime = 10 * time.Second var ( @@ -346,7 +353,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames } func TestAgentSuccessfulStartup(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) + prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -364,7 +371,7 @@ func TestAgentSuccessfulStartup(t *testing.T) { } func TestAgentFailedStartupWithServerFlag(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) output := bytes.Buffer{} prom.Stderr = &output @@ -391,7 +398,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) { } func TestAgentFailedStartupWithInvalidConfig(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -429,7 +436,7 @@ func TestModeSpecificFlags(t *testing.T) { args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"} if tc.mode == "agent" { - args = append(args, "--enable-feature=agent", "--config.file="+agentConfig) + args = append(args, "--agent", "--config.file="+agentConfig) } else { args = append(args, "--config.file="+promConfig) } @@ -499,3 +506,65 @@ func TestDocumentation(t *testing.T) { require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.") } + +func TestRwProtoMsgFlagParser(t *testing.T) { + defaultOpts := config.RemoteWriteProtoMsgs{ + config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2, + } + + for _, tcase := range []struct { + args []string + expected []config.RemoteWriteProtoMsg + expectedErr error + }{ + { + args: nil, + expected: defaultOpts, + }, + { + args: []string{"--test-proto-msgs", "test"}, + expectedErr: errors.New("unknown remote write protobuf message test, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request"), + }, + { + args: []string{"--test-proto-msgs", "io.prometheus.write.v2.Request"}, + expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2}, + }, + { + args: []string{ + "--test-proto-msgs", "io.prometheus.write.v2.Request", + "--test-proto-msgs", "io.prometheus.write.v2.Request", + }, + expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request] already"), + }, + { + args: []string{ + "--test-proto-msgs", "io.prometheus.write.v2.Request", + "--test-proto-msgs", "prometheus.WriteRequest", + }, + expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2, config.RemoteWriteProtoMsgV1}, + }, + { + args: []string{ + "--test-proto-msgs", "io.prometheus.write.v2.Request", + "--test-proto-msgs", "prometheus.WriteRequest", + "--test-proto-msgs", "io.prometheus.write.v2.Request", + }, + expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request prometheus.WriteRequest] already"), + }, + } { + t.Run(strings.Join(tcase.args, ","), func(t *testing.T) { + a := kingpin.New("test", "") + var opt []config.RemoteWriteProtoMsg + a.Flag("test-proto-msgs", "").Default(defaultOpts.Strings()...).SetValue(rwProtoMsgFlagValue(&opt)) + + _, err := a.Parse(tcase.args) + if tcase.expectedErr != nil { + require.Error(t, err) + require.Equal(t, tcase.expectedErr, err) + } else { + require.NoError(t, err) + require.Equal(t, tcase.expected, opt) + } + }) + } +} diff --git a/cmd/prometheus/scrape_failure_log_test.go b/cmd/prometheus/scrape_failure_log_test.go new file mode 100644 index 0000000000..8d86d719f9 --- /dev/null +++ b/cmd/prometheus/scrape_failure_log_test.go @@ -0,0 +1,193 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bytes" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/prometheus/prometheus/util/testutil" +) + +func TestScrapeFailureLogFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + // Tracks the number of requests made to the mock server. + var requestCount atomic.Int32 + + // Starts a server that always returns HTTP 500 errors. + mockServerAddress := startGarbageServer(t, &requestCount) + + // Create a temporary directory for Prometheus configuration and logs. + tempDir := t.TempDir() + + // Define file paths for the scrape failure log and Prometheus configuration. + // Like other files, the scrape failure log file should be relative to the + // config file. Therefore, we split the name we put in the file and the full + // path used to check the content of the file. + scrapeFailureLogFileName := "scrape_failure.log" + scrapeFailureLogFile := filepath.Join(tempDir, scrapeFailureLogFileName) + promConfigFile := filepath.Join(tempDir, "prometheus.yml") + + // Step 1: Set up an initial Prometheus configuration that globally + // specifies a scrape failure log file. + promConfig := fmt.Sprintf(` +global: + scrape_interval: 500ms + scrape_failure_log_file: %s + +scrape_configs: + - job_name: 'test_job' + static_configs: + - targets: ['%s'] +`, scrapeFailureLogFileName, mockServerAddress) + + err := os.WriteFile(promConfigFile, []byte(promConfig), 0o644) + require.NoError(t, err, "Failed to write Prometheus configuration file") + + // Start Prometheus with the generated configuration and a random port, enabling the lifecycle API. + port := testutil.RandomUnprivilegedPort(t) + params := []string{ + "-test.main", + "--config.file=" + promConfigFile, + "--storage.tsdb.path=" + filepath.Join(tempDir, "data"), + fmt.Sprintf("--web.listen-address=127.0.0.1:%d", port), + "--web.enable-lifecycle", + } + prometheusProcess := exec.Command(promPath, params...) + prometheusProcess.Stdout = os.Stdout + prometheusProcess.Stderr = os.Stderr + + err = prometheusProcess.Start() + require.NoError(t, err, "Failed to start Prometheus") + defer prometheusProcess.Process.Kill() + + // Wait until the mock server receives at least two requests from Prometheus. + require.Eventually(t, func() bool { + return requestCount.Load() >= 2 + }, 30*time.Second, 500*time.Millisecond, "Expected at least two requests to the mock server") + + // Verify that the scrape failures have been logged to the specified file. + content, err := os.ReadFile(scrapeFailureLogFile) + require.NoError(t, err, "Failed to read scrape failure log") + require.Contains(t, string(content), "server returned HTTP status 500 Internal Server Error", "Expected scrape failure log entry not found") + + // Step 2: Update the Prometheus configuration to remove the scrape failure + // log file setting. + promConfig = fmt.Sprintf(` +global: + scrape_interval: 1s + +scrape_configs: + - job_name: 'test_job' + static_configs: + - targets: ['%s'] +`, mockServerAddress) + + err = os.WriteFile(promConfigFile, []byte(promConfig), 0o644) + require.NoError(t, err, "Failed to update Prometheus configuration file") + + // Reload Prometheus with the updated configuration. + reloadPrometheus(t, port) + + // Count the number of lines in the scrape failure log file before any + // further requests. + preReloadLogLineCount := countLinesInFile(scrapeFailureLogFile) + + // Wait for at least two more requests to the mock server to ensure + // Prometheus continues scraping. + requestsBeforeReload := requestCount.Load() + require.Eventually(t, func() bool { + return requestCount.Load() >= requestsBeforeReload+2 + }, 30*time.Second, 500*time.Millisecond, "Expected two more requests to the mock server after configuration reload") + + // Ensure that no new lines were added to the scrape failure log file after + // the configuration change. + require.Equal(t, preReloadLogLineCount, countLinesInFile(scrapeFailureLogFile), "No new lines should be added to the scrape failure log file after removing the log setting") + + // Step 3: Re-add the scrape failure log file setting, but this time under + // scrape_configs, and reload Prometheus. + promConfig = fmt.Sprintf(` +global: + scrape_interval: 1s + +scrape_configs: + - job_name: 'test_job' + scrape_failure_log_file: %s + static_configs: + - targets: ['%s'] +`, scrapeFailureLogFileName, mockServerAddress) + + err = os.WriteFile(promConfigFile, []byte(promConfig), 0o644) + require.NoError(t, err, "Failed to update Prometheus configuration file") + + // Reload Prometheus with the updated configuration. + reloadPrometheus(t, port) + + // Wait for at least two more requests to the mock server and verify that + // new log entries are created. + postReloadLogLineCount := countLinesInFile(scrapeFailureLogFile) + requestsBeforeReAddingLog := requestCount.Load() + require.Eventually(t, func() bool { + return requestCount.Load() >= requestsBeforeReAddingLog+2 + }, 30*time.Second, 500*time.Millisecond, "Expected two additional requests after re-adding the log setting") + + // Confirm that new lines were added to the scrape failure log file. + require.Greater(t, countLinesInFile(scrapeFailureLogFile), postReloadLogLineCount, "New lines should be added to the scrape failure log file after re-adding the log setting") +} + +// reloadPrometheus sends a reload request to the Prometheus server to apply +// updated configurations. +func reloadPrometheus(t *testing.T, port int) { + resp, err := http.Post(fmt.Sprintf("http://127.0.0.1:%d/-/reload", port), "", nil) + require.NoError(t, err, "Failed to reload Prometheus") + require.Equal(t, http.StatusOK, resp.StatusCode, "Unexpected status code when reloading Prometheus") +} + +// startGarbageServer sets up a mock server that returns a 500 Internal Server Error +// for all requests. It also increments the request count each time it's hit. +func startGarbageServer(t *testing.T, requestCount *atomic.Int32) string { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount.Inc() + w.WriteHeader(http.StatusInternalServerError) + })) + t.Cleanup(server.Close) + + parsedURL, err := url.Parse(server.URL) + require.NoError(t, err, "Failed to parse mock server URL") + + return parsedURL.Host +} + +// countLinesInFile counts and returns the number of lines in the specified file. +func countLinesInFile(filePath string) int { + data, err := os.ReadFile(filePath) + if err != nil { + return 0 // Return 0 if the file doesn't exist or can't be read. + } + return bytes.Count(data, []byte{'\n'}) +} diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 400cae421a..16491f0416 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -85,7 +85,7 @@ func getCompatibleBlockDuration(maxBlockDuration int64) int64 { return blockDuration } -func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) { +func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool, customLabels map[string]string) (returnErr error) { blockDuration := getCompatibleBlockDuration(maxBlockDuration) mint = blockDuration * (mint / blockDuration) @@ -102,6 +102,8 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn nextSampleTs int64 = math.MaxInt64 ) + lb := labels.NewBuilder(labels.EmptyLabels()) + for t := mint; t <= maxt; t += blockDuration { tsUpper := t + blockDuration if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper { @@ -162,7 +164,13 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn l := labels.Labels{} p.Metric(&l) - if _, err := app.Append(0, l, *ts, v); err != nil { + lb.Reset(l) + for name, value := range customLabels { + lb.Set(name, value) + } + lbls := lb.Labels() + + if _, err := app.Append(0, lbls, *ts, v); err != nil { return fmt.Errorf("add sample: %w", err) } @@ -221,13 +229,13 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn return nil } -func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) { +func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration, customLabels map[string]string) (err error) { p := textparse.NewOpenMetricsParser(input, nil) // Don't need a SymbolTable to get max and min timestamps. maxt, mint, err := getMinAndMaxTimestamps(p) if err != nil { return fmt.Errorf("getting min and max timestamp: %w", err) } - if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet); err != nil { + if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet, customLabels); err != nil { return fmt.Errorf("block creation: %w", err) } return nil diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index 32abfa46a8..b818194e86 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -92,6 +92,7 @@ func TestBackfill(t *testing.T) { Description string MaxSamplesInAppender int MaxBlockDuration time.Duration + Labels map[string]string Expected struct { MinTime int64 MaxTime int64 @@ -636,6 +637,49 @@ http_requests_total{code="400"} 1024 7199 }, }, }, + { + ToParse: `# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{code="200"} 1 1624463088.000 +http_requests_total{code="200"} 2 1629503088.000 +http_requests_total{code="200"} 3 1629863088.000 +# EOF +`, + IsOk: true, + Description: "Sample with external labels.", + MaxSamplesInAppender: 5000, + MaxBlockDuration: 2048 * time.Hour, + Labels: map[string]string{"cluster_id": "123", "org_id": "999"}, + Expected: struct { + MinTime int64 + MaxTime int64 + NumBlocks int + BlockDuration int64 + Samples []backfillSample + }{ + MinTime: 1624463088000, + MaxTime: 1629863088000, + NumBlocks: 2, + BlockDuration: int64(1458 * time.Hour / time.Millisecond), + Samples: []backfillSample{ + { + Timestamp: 1624463088000, + Value: 1, + Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"), + }, + { + Timestamp: 1629503088000, + Value: 2, + Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"), + }, + { + Timestamp: 1629863088000, + Value: 3, + Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"), + }, + }, + }, + }, { ToParse: `# HELP rpc_duration_seconds A summary of the RPC duration in seconds. # TYPE rpc_duration_seconds summary @@ -689,7 +733,7 @@ after_eof 1 2 outputDir := t.TempDir() - err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration) + err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration, test.Labels) if !test.IsOk { require.Error(t, err, test.Description) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index e1d275e97e..4d4cf6c5db 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -62,6 +62,11 @@ import ( "github.com/prometheus/prometheus/util/documentcli" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + const ( successExitCode = 0 failureExitCode = 1 @@ -204,6 +209,7 @@ func main() { pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap() testCmd := app.Command("test", "Unit testing.") + junitOutFile := testCmd.Flag("junit", "File path to store JUnit XML test results.").OpenFile(os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) testRulesCmd := testCmd.Command("rules", "Unit tests for rules.") testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings() testRulesFiles := testRulesCmd.Arg( @@ -235,14 +241,14 @@ func main() { tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.") dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() - dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String() + dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String() dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.") dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() - dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String() + dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String() dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() @@ -252,6 +258,7 @@ func main() { importQuiet := importCmd.Flag("quiet", "Do not print created blocks.").Short('q').Bool() maxBlockDuration := importCmd.Flag("max-block-duration", "Maximum duration created blocks may span. Anything less than 2h is ignored.").Hidden().PlaceHolder("").Duration() openMetricsImportCmd := importCmd.Command("openmetrics", "Import samples from OpenMetrics input and produce TSDB blocks. Please refer to the storage docs for more details.") + openMetricsLabels := openMetricsImportCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times. Example --label=label_name=label_value").StringMap() importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String() importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String() importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.") @@ -323,8 +330,6 @@ func main() { noDefaultScrapePort = true case "": continue - case "promql-at-modifier", "promql-negative-offset": - fmt.Printf(" WARNING: Option for --enable-feature is a no-op after promotion to a stable feature: %q\n", o) default: fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o) } @@ -378,7 +383,11 @@ func main() { os.Exit(QueryLabels(serverURL, httpRoundTripper, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p)) case testRulesCmd.FullCommand(): - os.Exit(RulesUnitTest( + results := io.Discard + if *junitOutFile != nil { + results = *junitOutFile + } + os.Exit(RulesUnitTestResult(results, promqltest.LazyLoaderOpts{ EnableAtModifier: true, EnableNegativeOffset: true, @@ -403,7 +412,7 @@ func main() { os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics))) // TODO(aSquare14): Work on adding support for custom block size. case openMetricsImportCmd.FullCommand(): - os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) + os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration, *openMetricsLabels)) case importRulesCmd.FullCommand(): os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...))) @@ -466,7 +475,7 @@ func (ls lintConfig) lintDuplicateRules() bool { return ls.all || ls.duplicateRules } -// Check server status - healthy & ready. +// CheckServerStatus - healthy & ready. func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error { if serverURL.Scheme == "" { serverURL.Scheme = "http" diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 78500fe937..9d891c32fd 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -31,12 +31,19 @@ import ( "testing" "time" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" + "github.com/prometheus/prometheus/promql/promqltest" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + var promtoolPath = os.Args[0] func TestMain(m *testing.M) { @@ -549,3 +556,46 @@ func TestCheckRulesWithRuleFiles(t *testing.T) { require.Equal(t, lintErrExitCode, exitCode, "") }) } + +func TestTSDBDumpCommand(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + storage := promqltest.LoadedStorage(t, ` + load 1m + metric{foo="bar"} 1 2 3 + `) + t.Cleanup(func() { storage.Close() }) + + for _, c := range []struct { + name string + subCmd string + sandboxDirRoot string + }{ + { + name: "dump", + subCmd: "dump", + }, + { + name: "dump with sandbox dir root", + subCmd: "dump", + sandboxDirRoot: t.TempDir(), + }, + { + name: "dump-openmetrics", + subCmd: "dump-openmetrics", + }, + { + name: "dump-openmetrics with sandbox dir root", + subCmd: "dump-openmetrics", + sandboxDirRoot: t.TempDir(), + }, + } { + t.Run(c.name, func(t *testing.T) { + args := []string{"-test.main", "tsdb", c.subCmd, storage.Dir()} + cmd := exec.Command(promtoolPath, args...) + require.NoError(t, cmd.Run()) + }) + } +} diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index 46246b672a..4c91d1d6fe 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -31,7 +31,7 @@ import ( "github.com/prometheus/prometheus/util/fmtutil" ) -// Push metrics to a prometheus remote write (for testing purpose only). +// PushMetrics to a prometheus remote write (for testing purpose only). func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int { addressURL, err := url.Parse(url.String()) if err != nil { @@ -101,6 +101,7 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin return successExitCode } +// TODO(bwplotka): Add PRW 2.0 support. func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool { metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels) if err != nil { @@ -116,7 +117,7 @@ func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]s // Encode the request body into snappy encoding. compressed := snappy.Encode(nil, raw) - err = client.Store(context.Background(), compressed, 0) + _, err = client.Store(context.Background(), compressed, 0) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) return false diff --git a/cmd/promtool/testdata/unittest.yml b/cmd/promtool/testdata/unittest.yml index ff511729ba..e2a8230902 100644 --- a/cmd/promtool/testdata/unittest.yml +++ b/cmd/promtool/testdata/unittest.yml @@ -69,13 +69,13 @@ tests: eval_time: 2m exp_samples: - labels: "test_histogram_repeat" - histogram: "{{count:2 sum:3 buckets:[2]}}" + histogram: "{{count:2 sum:3 counter_reset_hint:not_reset buckets:[2]}}" - expr: test_histogram_increase eval_time: 2m exp_samples: - labels: "test_histogram_increase" - histogram: "{{count:4 sum:5.6 buckets:[4]}}" + histogram: "{{count:4 sum:5.6 counter_reset_hint:not_reset buckets:[4]}}" # Ensure a value is stale as soon as it is marked as such. - expr: test_stale @@ -89,11 +89,11 @@ tests: # Ensure lookback delta is respected, when a value is missing. - expr: timestamp(test_missing) - eval_time: 5m + eval_time: 4m59s exp_samples: - value: 0 - expr: timestamp(test_missing) - eval_time: 5m1s + eval_time: 5m exp_samples: [] # Minimal test case to check edge case of a single sample. @@ -113,7 +113,7 @@ tests: - expr: count_over_time(fixed_data[1h]) eval_time: 1h exp_samples: - - value: 61 + - value: 60 - expr: timestamp(fixed_data) eval_time: 1h exp_samples: @@ -183,7 +183,7 @@ tests: - expr: job:test:count_over_time1m eval_time: 1m exp_samples: - - value: 61 + - value: 60 labels: 'job:test:count_over_time1m{job="test"}' - expr: timestamp(job:test:count_over_time1m) eval_time: 1m10s @@ -194,7 +194,7 @@ tests: - expr: job:test:count_over_time1m eval_time: 2m exp_samples: - - value: 61 + - value: 60 labels: 'job:test:count_over_time1m{job="test"}' - expr: timestamp(job:test:count_over_time1m) eval_time: 2m59s999ms diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 2ed7244b1c..e6323d2944 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -367,25 +367,25 @@ func printBlocks(blocks []tsdb.BlockReader, writeHeader, humanReadable bool) { fmt.Fprintf(tw, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", meta.ULID, - getFormatedTime(meta.MinTime, humanReadable), - getFormatedTime(meta.MaxTime, humanReadable), + getFormattedTime(meta.MinTime, humanReadable), + getFormattedTime(meta.MaxTime, humanReadable), time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond, meta.Stats.NumSamples, meta.Stats.NumChunks, meta.Stats.NumSeries, - getFormatedBytes(b.Size(), humanReadable), + getFormattedBytes(b.Size(), humanReadable), ) } } -func getFormatedTime(timestamp int64, humanReadable bool) string { +func getFormattedTime(timestamp int64, humanReadable bool) string { if humanReadable { return time.Unix(timestamp/1000, 0).UTC().String() } return strconv.FormatInt(timestamp, 10) } -func getFormatedBytes(bytes int64, humanReadable bool) string { +func getFormattedBytes(bytes int64, humanReadable bool) string { if humanReadable { return units.Base2Bytes(bytes).String() } @@ -823,7 +823,7 @@ func checkErr(err error) int { return 0 } -func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int { +func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration, customLabels map[string]string) int { inputFile, err := fileutil.OpenMmapFile(path) if err != nil { return checkErr(err) @@ -834,7 +834,7 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB return checkErr(fmt.Errorf("create output dir: %w", err)) } - return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration)) + return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration, customLabels)) } func displayHistogram(dataType string, datas []int, total int) { @@ -866,16 +866,16 @@ func displayHistogram(dataType string, datas []int, total int) { fmt.Println() } -func generateBucket(min, max int) (start, end, step int) { - s := (max - min) / 10 +func generateBucket(minVal, maxVal int) (start, end, step int) { + s := (maxVal - minVal) / 10 step = 10 for step < s && step <= 10000 { step *= 10 } - start = min - min%step - end = max - max%step + step + start = minVal - minVal%step + end = maxVal - maxVal%step + step return } diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go index 75089b168b..90192e31ab 100644 --- a/cmd/promtool/tsdb_test.go +++ b/cmd/promtool/tsdb_test.go @@ -20,6 +20,7 @@ import ( "math" "os" "runtime" + "slices" "strings" "testing" "time" @@ -54,7 +55,7 @@ func TestGenerateBucket(t *testing.T) { } // getDumpedSamples dumps samples and returns them. -func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string { +func getDumpedSamples(t *testing.T, databasePath, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string { t.Helper() oldStdout := os.Stdout @@ -63,8 +64,8 @@ func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []strin err := dumpSamples( context.Background(), - path, - t.TempDir(), + databasePath, + sandboxDirRoot, mint, maxt, match, @@ -95,13 +96,15 @@ func TestTSDBDump(t *testing.T) { heavy_metric{foo="bar"} 5 4 3 2 1 heavy_metric{foo="foo"} 5 4 3 2 1 `) + t.Cleanup(func() { storage.Close() }) tests := []struct { - name string - mint int64 - maxt int64 - match []string - expectedDump string + name string + mint int64 + maxt int64 + sandboxDirRoot string + match []string + expectedDump string }{ { name: "default match", @@ -110,6 +113,14 @@ func TestTSDBDump(t *testing.T) { match: []string{"{__name__=~'(?s:.*)'}"}, expectedDump: "testdata/dump-test-1.prom", }, + { + name: "default match with sandbox dir root set", + mint: math.MinInt64, + maxt: math.MaxInt64, + sandboxDirRoot: t.TempDir(), + match: []string{"{__name__=~'(?s:.*)'}"}, + expectedDump: "testdata/dump-test-1.prom", + }, { name: "same matcher twice", mint: math.MinInt64, @@ -148,28 +159,51 @@ func TestTSDBDump(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.mint, tt.maxt, tt.match, formatSeriesSet) + dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, tt.mint, tt.maxt, tt.match, formatSeriesSet) expectedMetrics, err := os.ReadFile(tt.expectedDump) require.NoError(t, err) expectedMetrics = normalizeNewLine(expectedMetrics) - // even though in case of one matcher samples are not sorted, the order in the cases above should stay the same. - require.Equal(t, string(expectedMetrics), dumpedMetrics) + // Sort both, because Prometheus does not guarantee the output order. + require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics)) }) } } +func sortLines(buf string) string { + lines := strings.Split(buf, "\n") + slices.Sort(lines) + return strings.Join(lines, "\n") +} + func TestTSDBDumpOpenMetrics(t *testing.T) { storage := promqltest.LoadedStorage(t, ` load 1m my_counter{foo="bar", baz="abc"} 1 2 3 4 5 my_gauge{bar="foo", abc="baz"} 9 8 0 4 7 `) + t.Cleanup(func() { storage.Close() }) - expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom") - require.NoError(t, err) - expectedMetrics = normalizeNewLine(expectedMetrics) - dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) - require.Equal(t, string(expectedMetrics), dumpedMetrics) + tests := []struct { + name string + sandboxDirRoot string + }{ + { + name: "default match", + }, + { + name: "default match with sandbox dir root set", + sandboxDirRoot: t.TempDir(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom") + require.NoError(t, err) + expectedMetrics = normalizeNewLine(expectedMetrics) + dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) + require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics)) + }) + } } func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { @@ -179,7 +213,7 @@ func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { dbDir := t.TempDir() // Import samples from OM format - err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour) + err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour, map[string]string{}) require.NoError(t, err) db, err := tsdb.Open(dbDir, nil, nil, tsdb.DefaultOptions(), nil) require.NoError(t, err) @@ -188,7 +222,7 @@ func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { }) // Dump the blocks into OM format - dumpedMetrics := getDumpedSamples(t, dbDir, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) + dumpedMetrics := getDumpedSamples(t, dbDir, "", math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) // Should get back the initial metrics. require.Equal(t, string(initialMetrics), dumpedMetrics) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 5451c5296c..7030635d1c 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -18,6 +18,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "os" "path/filepath" "sort" @@ -29,9 +30,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/grafana/regexp" "github.com/nsf/jsondiff" - "github.com/prometheus/common/model" "gopkg.in/yaml.v2" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" @@ -39,12 +41,18 @@ import ( "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/junitxml" ) // RulesUnitTest does unit testing of rules based on the unit testing files provided. // More info about the file format can be found in the docs. func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { + return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...) +} + +func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { failed := false + junit := &junitxml.JUnitXML{} var run *regexp.Regexp if runStrings != nil { @@ -52,7 +60,7 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif } for _, f := range files { - if errs := ruleUnitTest(f, queryOpts, run, diffFlag); errs != nil { + if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) @@ -64,25 +72,30 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif } fmt.Println() } + err := junit.WriteXML(results) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to write JUnit XML: %s\n", err) + } if failed { return failureExitCode } return successExitCode } -func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error { - fmt.Println("Unit Testing: ", filename) - +func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error { b, err := os.ReadFile(filename) if err != nil { + ts.Abort(err) return []error{err} } var unitTestInp unitTestFile if err := yaml.UnmarshalStrict(b, &unitTestInp); err != nil { + ts.Abort(err) return []error{err} } if err := resolveAndGlobFilepaths(filepath.Dir(filename), &unitTestInp); err != nil { + ts.Abort(err) return []error{err} } @@ -91,29 +104,38 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg } evalInterval := time.Duration(unitTestInp.EvaluationInterval) - + ts.Settime(time.Now().Format("2006-01-02T15:04:05")) // Giving number for groups mentioned in the file for ordering. // Lower number group should be evaluated before higher number group. groupOrderMap := make(map[string]int) for i, gn := range unitTestInp.GroupEvalOrder { if _, ok := groupOrderMap[gn]; ok { - return []error{fmt.Errorf("group name repeated in evaluation order: %s", gn)} + err := fmt.Errorf("group name repeated in evaluation order: %s", gn) + ts.Abort(err) + return []error{err} } groupOrderMap[gn] = i } // Testing. var errs []error - for _, t := range unitTestInp.Tests { + for i, t := range unitTestInp.Tests { if !matchesRun(t.TestGroupName, run) { continue } - + testname := t.TestGroupName + if testname == "" { + testname = fmt.Sprintf("unnamed#%d", i) + } + tc := ts.Case(testname) if t.Interval == 0 { t.Interval = unitTestInp.EvaluationInterval } ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...) if ers != nil { + for _, e := range ers { + tc.Fail(e.Error()) + } errs = append(errs, ers...) } } diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go index 2dbd5a4e51..9bbac28e9f 100644 --- a/cmd/promtool/unittest_test.go +++ b/cmd/promtool/unittest_test.go @@ -14,11 +14,15 @@ package main import ( + "bytes" + "encoding/xml" + "fmt" "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/promql/promqltest" + "github.com/prometheus/prometheus/util/junitxml" ) func TestRulesUnitTest(t *testing.T) { @@ -125,13 +129,59 @@ func TestRulesUnitTest(t *testing.T) { want: 0, }, } + reuseFiles := []string{} + reuseCount := [2]int{} for _, tt := range tests { + if (tt.queryOpts == promqltest.LazyLoaderOpts{ + EnableNegativeOffset: true, + } || tt.queryOpts == promqltest.LazyLoaderOpts{ + EnableAtModifier: true, + }) { + reuseFiles = append(reuseFiles, tt.args.files...) + reuseCount[tt.want] += len(tt.args.files) + } t.Run(tt.name, func(t *testing.T) { if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want { t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) } }) } + t.Run("Junit xml output ", func(t *testing.T) { + var buf bytes.Buffer + if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 { + t.Errorf("RulesUnitTestResults() = %v, want 1", got) + } + var test junitxml.JUnitXML + output := buf.Bytes() + err := xml.Unmarshal(output, &test) + if err != nil { + fmt.Println("error in decoding XML:", err) + return + } + var total int + var passes int + var failures int + var cases int + total = len(test.Suites) + if total != len(reuseFiles) { + t.Errorf("JUnit output had %d testsuite elements; expected %d\n", total, len(reuseFiles)) + } + + for _, i := range test.Suites { + if i.FailureCount == 0 { + passes++ + } else { + failures++ + } + cases += len(i.Cases) + } + if total != passes+failures { + t.Errorf("JUnit output mismatch: Total testsuites (%d) does not equal the sum of passes (%d) and failures (%d).", total, passes, failures) + } + if cases < total { + t.Errorf("JUnit output had %d suites without test cases\n", total-cases) + } + }) } func TestRulesUnitTestRun(t *testing.T) { diff --git a/config/config.go b/config/config.go index 5615789789..8919f5fee0 100644 --- a/config/config.go +++ b/config/config.go @@ -37,6 +37,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/storage/remote/azuread" + "github.com/prometheus/prometheus/storage/remote/googleiam" ) var ( @@ -66,6 +67,11 @@ var ( } ) +const ( + LegacyValidationConfig = "legacy" + UTF8ValidationConfig = "utf8" +) + // Load parses the YAML input s into a Config. func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { cfg := &Config{} @@ -180,6 +186,7 @@ var ( // DefaultRemoteWriteConfig is the default remote write configuration. DefaultRemoteWriteConfig = RemoteWriteConfig{ RemoteTimeout: model.Duration(30 * time.Second), + ProtobufMessage: RemoteWriteProtoMsgV1, QueueConfig: DefaultQueueConfig, MetadataConfig: DefaultMetadataConfig, HTTPClientConfig: config.DefaultHTTPClientConfig, @@ -214,6 +221,7 @@ var ( // DefaultRemoteReadConfig is the default remote read configuration. DefaultRemoteReadConfig = RemoteReadConfig{ RemoteTimeout: model.Duration(1 * time.Minute), + ChunkedReadLimit: DefaultChunkedReadLimit, HTTPClientConfig: config.DefaultHTTPClientConfig, FilterExternalLabels: true, } @@ -226,6 +234,9 @@ var ( DefaultExemplarsConfig = ExemplarsConfig{ MaxExemplars: 100000, } + + // DefaultOTLPConfig is the default OTLP configuration. + DefaultOTLPConfig = OTLPConfig{} ) // Config is the top-level configuration for Prometheus's config files. @@ -241,6 +252,7 @@ type Config struct { RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` + OTLPConfig OTLPConfig `yaml:"otlp,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -279,7 +291,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { jobNames := map[string]string{} for i, scfg := range c.ScrapeConfigs { - // We do these checks for library users that would not call Validate in + // We do these checks for library users that would not call validate in // Unmarshal. if err := scfg.Validate(c.GlobalConfig); err != nil { return nil, err @@ -417,6 +429,8 @@ type GlobalConfig struct { RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"` // File to which PromQL queries are logged. QueryLogFile string `yaml:"query_log_file,omitempty"` + // File to which scrape failures are logged. + ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` // An uncompressed response body larger than this many bytes will cause the @@ -440,6 +454,8 @@ type GlobalConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` + // Allow UTF8 Metric and Label Names. + MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"` } // ScrapeProtocol represents supported protocol for scraping metrics. @@ -465,6 +481,7 @@ var ( PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" + UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8 ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", @@ -514,6 +531,7 @@ func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error { // SetDirectory joins any relative file paths with dir. func (c *GlobalConfig) SetDirectory(dir string) { c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile) + c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -576,6 +594,7 @@ func (c *GlobalConfig) isZero() bool { c.EvaluationInterval == 0 && c.RuleQueryOffset == 0 && c.QueryLogFile == "" && + c.ScrapeFailureLogFile == "" && c.ScrapeProtocols == nil } @@ -619,6 +638,8 @@ type ScrapeConfig struct { ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` // Whether to convert a scraped classic histogram into a native histogram with custom buckets. ConvertClassicHistograms bool `yaml:"convert_classic_histograms,omitempty"` + // File to which scrape failures are logged. + ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. @@ -652,6 +673,8 @@ type ScrapeConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` + // Allow UTF8 Metric and Label Names. + MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -669,6 +692,7 @@ type ScrapeConfig struct { func (c *ScrapeConfig) SetDirectory(dir string) { c.ServiceDiscoveryConfigs.SetDirectory(dir) c.HTTPClientConfig.SetDirectory(dir) + c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -750,6 +774,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c.KeepDroppedTargets == 0 { c.KeepDroppedTargets = globalConfig.KeepDroppedTargets } + if c.ScrapeFailureLogFile == "" { + c.ScrapeFailureLogFile = globalConfig.ScrapeFailureLogFile + } if c.ScrapeProtocols == nil { c.ScrapeProtocols = globalConfig.ScrapeProtocols @@ -758,6 +785,19 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName) } + switch globalConfig.MetricNameValidationScheme { + case LegacyValidationConfig: + case "", UTF8ValidationConfig: + if model.NameValidationScheme != model.UTF8Validation { + panic("utf8 name validation requested but model.NameValidationScheme is not set to UTF8") + } + default: + return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) + } + if c.MetricNameValidationScheme == "" { + c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme + } + return nil } @@ -1057,6 +1097,50 @@ func CheckTargetAddress(address model.LabelValue) error { return nil } +// RemoteWriteProtoMsg represents the known protobuf message for the remote write +// 1.0 and 2.0 specs. +type RemoteWriteProtoMsg string + +// Validate returns error if the given reference for the protobuf message is not supported. +func (s RemoteWriteProtoMsg) Validate() error { + switch s { + case RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2: + return nil + default: + return fmt.Errorf("unknown remote write protobuf message %v, supported: %v", s, RemoteWriteProtoMsgs{RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2}.String()) + } +} + +type RemoteWriteProtoMsgs []RemoteWriteProtoMsg + +func (m RemoteWriteProtoMsgs) Strings() []string { + ret := make([]string, 0, len(m)) + for _, typ := range m { + ret = append(ret, string(typ)) + } + return ret +} + +func (m RemoteWriteProtoMsgs) String() string { + return strings.Join(m.Strings(), ", ") +} + +var ( + // RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf + // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/, + // which will eventually be deprecated. + // + // NOTE: This string is used for both HTTP header values and config value, so don't change + // this reference. + RemoteWriteProtoMsgV1 RemoteWriteProtoMsg = "prometheus.WriteRequest" + // RemoteWriteProtoMsgV2 represents the `io.prometheus.write.v2.Request` protobuf + // message introduced in https://prometheus.io/docs/specs/remote_write_spec_2_0/ + // + // NOTE: This string is used for both HTTP header values and config value, so don't change + // this reference. + RemoteWriteProtoMsgV2 RemoteWriteProtoMsg = "io.prometheus.write.v2.Request" +) + // RemoteWriteConfig is the configuration for writing to remote storage. type RemoteWriteConfig struct { URL *config.URL `yaml:"url"` @@ -1066,6 +1150,9 @@ type RemoteWriteConfig struct { Name string `yaml:"name,omitempty"` SendExemplars bool `yaml:"send_exemplars,omitempty"` SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"` + // ProtobufMessage specifies the protobuf message to use against the remote + // receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/ + ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -1074,6 +1161,7 @@ type RemoteWriteConfig struct { MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"` + GoogleIAMConfig *googleiam.Config `yaml:"google_iam,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -1100,6 +1188,10 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err return err } + if err := c.ProtobufMessage.Validate(); err != nil { + return fmt.Errorf("invalid protobuf_message value: %w", err) + } + // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. // We cannot make it a pointer as the parser panics for inlined pointer structs. // Thus we just do its validation here. @@ -1107,17 +1199,33 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err return err } - httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || - c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil + return validateAuthConfigs(c) +} - if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") +// validateAuthConfigs validates that at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. +func validateAuthConfigs(c *RemoteWriteConfig) error { + var authConfigured []string + if c.HTTPClientConfig.BasicAuth != nil { + authConfigured = append(authConfigured, "basic_auth") } - - if c.SigV4Config != nil && c.AzureADConfig != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") + if c.HTTPClientConfig.Authorization != nil { + authConfigured = append(authConfigured, "authorization") + } + if c.HTTPClientConfig.OAuth2 != nil { + authConfigured = append(authConfigured, "oauth2") + } + if c.SigV4Config != nil { + authConfigured = append(authConfigured, "sigv4") + } + if c.AzureADConfig != nil { + authConfigured = append(authConfigured, "azuread") + } + if c.GoogleIAMConfig != nil { + authConfigured = append(authConfigured, "google_iam") + } + if len(authConfigured) > 1 { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. Currently configured: %v", authConfigured) } - return nil } @@ -1136,7 +1244,7 @@ func validateHeadersForTracing(headers map[string]string) error { func validateHeaders(headers map[string]string) error { for header := range headers { if strings.ToLower(header) == "authorization" { - return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter") + return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter") } if _, ok := reservedHeaders[strings.ToLower(header)]; ok { return fmt.Errorf("%s is a reserved header. It must not be changed", header) @@ -1184,13 +1292,20 @@ type MetadataConfig struct { MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"` } +const ( + // DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows. + // 50MB is the default. This is equivalent to ~100k full XOR chunks and average labelset. + DefaultChunkedReadLimit = 5e+7 +) + // RemoteReadConfig is the configuration for reading from remote storage. type RemoteReadConfig struct { - URL *config.URL `yaml:"url"` - RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` - Headers map[string]string `yaml:"headers,omitempty"` - ReadRecent bool `yaml:"read_recent,omitempty"` - Name string `yaml:"name,omitempty"` + URL *config.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + ChunkedReadLimit uint64 `yaml:"chunked_read_limit,omitempty"` + Headers map[string]string `yaml:"headers,omitempty"` + ReadRecent bool `yaml:"read_recent,omitempty"` + Name string `yaml:"name,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -1255,3 +1370,35 @@ func getGoGCEnv() int { } return DefaultRuntimeConfig.GoGC } + +// OTLPConfig is the configuration for writing to the OTLP endpoint. +type OTLPConfig struct { + PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultOTLPConfig + type plain OTLPConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + seen := map[string]struct{}{} + var err error + for i, attr := range c.PromoteResourceAttributes { + attr = strings.TrimSpace(attr) + if attr == "" { + err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute")) + continue + } + if _, exists := seen[attr]; exists { + err = errors.Join(err, fmt.Errorf("duplicated promoted OTel resource attribute %q", attr)) + continue + } + + seen[attr] = struct{}{} + c.PromoteResourceAttributes[i] = attr + } + return err +} diff --git a/config/config_test.go b/config/config_test.go index d84059b48f..66377f6879 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -16,6 +16,7 @@ package config import ( "crypto/tls" "encoding/json" + "fmt" "net/url" "os" "path/filepath" @@ -61,6 +62,11 @@ import ( "github.com/prometheus/prometheus/util/testutil" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + func mustParseURL(u string) *config.URL { parsed, err := url.Parse(u) if err != nil { @@ -77,14 +83,16 @@ const ( globLabelNameLengthLimit = 200 globLabelValueLengthLimit = 200 globalGoGC = 42 + globScrapeFailureLogFile = "testdata/fail.log" ) var expectedConf = &Config{ GlobalConfig: GlobalConfig{ - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EvaluationInterval: model.Duration(30 * time.Second), - QueryLogFile: "", + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EvaluationInterval: model.Duration(30 * time.Second), + QueryLogFile: "testdata/query.log", + ScrapeFailureLogFile: globScrapeFailureLogFile, ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"), @@ -108,9 +116,10 @@ var expectedConf = &Config{ RemoteWriteConfigs: []*RemoteWriteConfig{ { - URL: mustParseURL("http://remote1/push"), - RemoteTimeout: model.Duration(30 * time.Second), - Name: "drop_expensive", + URL: mustParseURL("http://remote1/push"), + ProtobufMessage: RemoteWriteProtoMsgV1, + RemoteTimeout: model.Duration(30 * time.Second), + Name: "drop_expensive", WriteRelabelConfigs: []*relabel.Config{ { SourceLabels: model.LabelNames{"__name__"}, @@ -137,11 +146,12 @@ var expectedConf = &Config{ }, }, { - URL: mustParseURL("http://remote2/push"), - RemoteTimeout: model.Duration(30 * time.Second), - QueueConfig: DefaultQueueConfig, - MetadataConfig: DefaultMetadataConfig, - Name: "rw_tls", + URL: mustParseURL("http://remote2/push"), + ProtobufMessage: RemoteWriteProtoMsgV2, + RemoteTimeout: model.Duration(30 * time.Second), + QueueConfig: DefaultQueueConfig, + MetadataConfig: DefaultMetadataConfig, + Name: "rw_tls", HTTPClientConfig: config.HTTPClientConfig{ TLSConfig: config.TLSConfig{ CertFile: filepath.FromSlash("testdata/valid_cert_file"), @@ -154,12 +164,19 @@ var expectedConf = &Config{ }, }, + OTLPConfig: OTLPConfig{ + PromoteResourceAttributes: []string{ + "k8s.cluster.name", "k8s.job.name", "k8s.namespace.name", + }, + }, + RemoteReadConfigs: []*RemoteReadConfig{ { - URL: mustParseURL("http://remote1/read"), - RemoteTimeout: model.Duration(1 * time.Minute), - ReadRecent: true, - Name: "default", + URL: mustParseURL("http://remote1/read"), + RemoteTimeout: model.Duration(1 * time.Minute), + ChunkedReadLimit: DefaultChunkedReadLimit, + ReadRecent: true, + Name: "default", HTTPClientConfig: config.HTTPClientConfig{ FollowRedirects: true, EnableHTTP2: false, @@ -169,6 +186,7 @@ var expectedConf = &Config{ { URL: mustParseURL("http://remote3/read"), RemoteTimeout: model.Duration(1 * time.Minute), + ChunkedReadLimit: DefaultChunkedReadLimit, ReadRecent: false, Name: "read_special", RequiredMatchers: model.LabelSet{"job": "special"}, @@ -200,6 +218,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: "testdata/fail_prom.log", MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -214,6 +233,15 @@ var expectedConf = &Config{ TLSConfig: config.TLSConfig{ MinVersion: config.TLSVersion(tls.VersionTLS10), }, + HTTPHeaders: &config.Headers{ + Headers: map[string]config.Header{ + "foo": { + Values: []string{"foobar"}, + Secrets: []config.Secret{"bar", "foo"}, + Files: []string{filepath.FromSlash("testdata/valid_password_file")}, + }, + }, + }, }, ServiceDiscoveryConfigs: discovery.Configs{ @@ -303,6 +331,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: 210, LabelValueLengthLimit: 210, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}, + ScrapeFailureLogFile: globScrapeFailureLogFile, HTTPClientConfig: config.HTTPClientConfig{ BasicAuth: &config.BasicAuth{ @@ -400,6 +429,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -455,6 +485,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: "/metrics", Scheme: "http", @@ -488,6 +519,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -527,6 +559,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -566,6 +599,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -595,6 +629,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -632,6 +667,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -666,6 +702,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -707,6 +744,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -738,6 +776,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -772,6 +811,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -799,6 +839,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -829,6 +870,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: "/federate", Scheme: DefaultScrapeConfig.Scheme, @@ -859,6 +901,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -889,6 +932,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -916,6 +960,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -951,6 +996,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -985,6 +1031,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1016,6 +1063,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1046,6 +1094,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1080,6 +1129,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1117,6 +1167,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1173,6 +1224,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1200,6 +1252,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1238,6 +1291,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1282,6 +1336,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1317,6 +1372,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1346,6 +1402,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1378,6 +1435,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1469,6 +1527,26 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) { require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit) } +func TestOTLPSanitizeResourceAttributes(t *testing.T) { + t.Run("good config", func(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) + + require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.PromoteResourceAttributes) + }) + + t.Run("bad config", func(t *testing.T) { + _, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger()) + require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`) + require.ErrorContains(t, err, `empty promoted OTel resource attribute`) + }) +} + func TestLoadConfig(t *testing.T) { // Parse a valid file that sets a global scrape timeout. This tests whether parsing // an overwritten default field in the global config permanently changes the default. @@ -1501,7 +1579,7 @@ func TestElideSecrets(t *testing.T) { yamlConfig := string(config) matches := secretRe.FindAllStringIndex(yamlConfig, -1) - require.Len(t, matches, 22, "wrong number of secret matches found") + require.Len(t, matches, 24, "wrong number of secret matches found") require.NotContains(t, yamlConfig, "mysecret", "yaml marshal reveals authentication credentials.") } @@ -1798,7 +1876,11 @@ var expectedErrors = []struct { }, { filename: "remote_write_authorization_header.bad.yml", - errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`, + errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter`, + }, + { + filename: "remote_write_wrong_msg.bad.yml", + errMsg: `invalid protobuf_message value: unknown remote write protobuf message io.prometheus.writet.v2.Request, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request`, }, { filename: "remote_write_url_missing.bad.yml", @@ -2007,6 +2089,10 @@ var expectedErrors = []struct { } func TestBadConfigs(t *testing.T) { + model.NameValidationScheme = model.LegacyValidation + defer func() { + model.NameValidationScheme = model.UTF8Validation + }() for _, ee := range expectedErrors { _, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger()) require.Error(t, err, "%s", ee.filename) @@ -2016,6 +2102,10 @@ func TestBadConfigs(t *testing.T) { } func TestBadStaticConfigsJSON(t *testing.T) { + model.NameValidationScheme = model.LegacyValidation + defer func() { + model.NameValidationScheme = model.UTF8Validation + }() content, err := os.ReadFile("testdata/static_config.bad.json") require.NoError(t, err) var tg targetgroup.Group @@ -2024,6 +2114,10 @@ func TestBadStaticConfigsJSON(t *testing.T) { } func TestBadStaticConfigsYML(t *testing.T) { + model.NameValidationScheme = model.LegacyValidation + defer func() { + model.NameValidationScheme = model.UTF8Validation + }() content, err := os.ReadFile("testdata/static_config.bad.yml") require.NoError(t, err) var tg targetgroup.Group @@ -2268,3 +2362,52 @@ func TestScrapeConfigDisableCompression(t *testing.T) { require.False(t, got.ScrapeConfigs[0].EnableCompression) } + +func TestScrapeConfigNameValidationSettings(t *testing.T) { + model.NameValidationScheme = model.UTF8Validation + defer func() { + model.NameValidationScheme = model.LegacyValidation + }() + + tests := []struct { + name string + inputFile string + expectScheme string + }{ + { + name: "blank config implies default", + inputFile: "scrape_config_default_validation_mode", + expectScheme: "", + }, + { + name: "global setting implies local settings", + inputFile: "scrape_config_global_validation_mode", + expectScheme: "legacy", + }, + { + name: "local setting", + inputFile: "scrape_config_local_validation_mode", + expectScheme: "legacy", + }, + { + name: "local setting overrides global setting", + inputFile: "scrape_config_local_global_validation_mode", + expectScheme: "utf8", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, log.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + + require.NoError(t, err) + got := &Config{} + require.NoError(t, yaml.UnmarshalStrict(out, got)) + + require.Equal(t, tc.expectScheme, got.ScrapeConfigs[0].MetricNameValidationScheme) + }) + } +} diff --git a/config/reload.go b/config/reload.go new file mode 100644 index 0000000000..8be1b28d8a --- /dev/null +++ b/config/reload.go @@ -0,0 +1,92 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "os" + "path/filepath" + + "gopkg.in/yaml.v2" +) + +type ExternalFilesConfig struct { + RuleFiles []string `yaml:"rule_files"` + ScrapeConfigFiles []string `yaml:"scrape_config_files"` +} + +// GenerateChecksum generates a checksum of the YAML file and the files it references. +func GenerateChecksum(yamlFilePath string) (string, error) { + hash := sha256.New() + + yamlContent, err := os.ReadFile(yamlFilePath) + if err != nil { + return "", fmt.Errorf("error reading YAML file: %w", err) + } + _, err = hash.Write(yamlContent) + if err != nil { + return "", fmt.Errorf("error writing YAML file to hash: %w", err) + } + + var config ExternalFilesConfig + if err := yaml.Unmarshal(yamlContent, &config); err != nil { + return "", fmt.Errorf("error unmarshalling YAML: %w", err) + } + + dir := filepath.Dir(yamlFilePath) + + for i, file := range config.RuleFiles { + config.RuleFiles[i] = filepath.Join(dir, file) + } + for i, file := range config.ScrapeConfigFiles { + config.ScrapeConfigFiles[i] = filepath.Join(dir, file) + } + + files := map[string][]string{ + "r": config.RuleFiles, // "r" for rule files + "s": config.ScrapeConfigFiles, // "s" for scrape config files + } + + for _, prefix := range []string{"r", "s"} { + for _, pattern := range files[prefix] { + matchingFiles, err := filepath.Glob(pattern) + if err != nil { + return "", fmt.Errorf("error finding files with pattern %q: %w", pattern, err) + } + + for _, file := range matchingFiles { + // Write prefix to the hash ("r" or "s") followed by \0, then + // the file path. + _, err = hash.Write([]byte(prefix + "\x00" + file + "\x00")) + if err != nil { + return "", fmt.Errorf("error writing %q path to hash: %w", file, err) + } + + // Read and hash the content of the file. + content, err := os.ReadFile(file) + if err != nil { + return "", fmt.Errorf("error reading file %s: %w", file, err) + } + _, err = hash.Write(append(content, []byte("\x00")...)) + if err != nil { + return "", fmt.Errorf("error writing %q content to hash: %w", file, err) + } + } + } + } + + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/config/reload_test.go b/config/reload_test.go new file mode 100644 index 0000000000..f0f44f3588 --- /dev/null +++ b/config/reload_test.go @@ -0,0 +1,222 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGenerateChecksum(t *testing.T) { + tmpDir := t.TempDir() + + // Define paths for the temporary files. + yamlFilePath := filepath.Join(tmpDir, "test.yml") + ruleFilePath := filepath.Join(tmpDir, "rule_file.yml") + scrapeConfigFilePath := filepath.Join(tmpDir, "scrape_config.yml") + + // Define initial and modified content for the files. + originalRuleContent := "groups:\n- name: example\n rules:\n - alert: ExampleAlert" + modifiedRuleContent := "groups:\n- name: example\n rules:\n - alert: ModifiedAlert" + + originalScrapeConfigContent := "scrape_configs:\n- job_name: example" + modifiedScrapeConfigContent := "scrape_configs:\n- job_name: modified_example" + + // Define YAML content referencing the rule and scrape config files. + yamlContent := ` +rule_files: + - rule_file.yml +scrape_config_files: + - scrape_config.yml +` + + // Write initial content to files. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Generate the original checksum. + originalChecksum := calculateChecksum(t, yamlFilePath) + + t.Run("Rule File Change", func(t *testing.T) { + // Modify the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(modifiedRuleContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Scrape Config Change", func(t *testing.T) { + // Modify the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(modifiedScrapeConfigContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Rule File Deletion", func(t *testing.T) { + // Delete the rule file. + require.NoError(t, os.Remove(ruleFilePath)) + + // Checksum should change. + deletedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, deletedChecksum) + + // Restore the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Scrape Config Deletion", func(t *testing.T) { + // Delete the scrape config file. + require.NoError(t, os.Remove(scrapeConfigFilePath)) + + // Checksum should change. + deletedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, deletedChecksum) + + // Restore the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Main File Change", func(t *testing.T) { + // Modify the main YAML file. + modifiedYamlContent := ` +global: + scrape_interval: 3s +rule_files: + - rule_file.yml +scrape_config_files: + - scrape_config.yml +` + require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the main YAML file. + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Rule File Removed from YAML Config", func(t *testing.T) { + // Modify the YAML content to remove the rule file. + modifiedYamlContent := ` +scrape_config_files: + - scrape_config.yml +` + require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the YAML content. + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Scrape Config Removed from YAML Config", func(t *testing.T) { + // Modify the YAML content to remove the scrape config file. + modifiedYamlContent := ` +rule_files: + - rule_file.yml +` + require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the YAML content. + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Empty Rule File", func(t *testing.T) { + // Write an empty rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(""), 0o644)) + + // Checksum should change. + emptyChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, emptyChecksum) + + // Restore the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Empty Scrape Config File", func(t *testing.T) { + // Write an empty scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(""), 0o644)) + + // Checksum should change. + emptyChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, emptyChecksum) + + // Restore the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) +} + +// calculateChecksum generates a checksum for the given YAML file path. +func calculateChecksum(t *testing.T, yamlFilePath string) string { + checksum, err := GenerateChecksum(yamlFilePath) + require.NoError(t, err) + require.NotEmpty(t, checksum) + return checksum +} diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 184e6363ce..9eb7995432 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -8,6 +8,8 @@ global: label_limit: 30 label_name_length_limit: 200 label_value_length_limit: 200 + query_log_file: query.log + scrape_failure_log_file: fail.log # scrape_timeout is set to the global default (10s). external_labels: @@ -37,6 +39,7 @@ remote_write: key_file: valid_key_file - url: http://remote2/push + protobuf_message: io.prometheus.write.v2.Request name: rw_tls tls_config: cert_file: valid_cert_file @@ -44,6 +47,9 @@ remote_write: headers: name: value +otlp: + promote_resource_attributes: ["k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"] + remote_read: - url: http://remote1/read read_recent: true @@ -68,6 +74,7 @@ scrape_configs: # metrics_path defaults to '/metrics' # scheme defaults to 'http'. + scrape_failure_log_file: fail_prom.log file_sd_configs: - files: - foo/*.slow.json @@ -83,6 +90,12 @@ scrape_configs: my: label your: label + http_headers: + foo: + values: ["foobar"] + secrets: ["bar", "foo"] + files: ["valid_password_file"] + relabel_configs: - source_labels: [job, __meta_dns_name] regex: (.*)some-[regex] diff --git a/config/testdata/jobname_dup.bad.yml b/config/testdata/jobname_dup.bad.yml index 0265493c30..d03cb0cf97 100644 --- a/config/testdata/jobname_dup.bad.yml +++ b/config/testdata/jobname_dup.bad.yml @@ -1,4 +1,6 @@ # Two scrape configs with the same job names are not allowed. +global: + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus - job_name: service-x diff --git a/config/testdata/lowercase.bad.yml b/config/testdata/lowercase.bad.yml index 9bc9583341..6dd72e6476 100644 --- a/config/testdata/lowercase.bad.yml +++ b/config/testdata/lowercase.bad.yml @@ -1,3 +1,5 @@ +global: + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus relabel_configs: diff --git a/config/testdata/otlp_sanitize_resource_attributes.bad.yml b/config/testdata/otlp_sanitize_resource_attributes.bad.yml new file mode 100644 index 0000000000..37ec5d1209 --- /dev/null +++ b/config/testdata/otlp_sanitize_resource_attributes.bad.yml @@ -0,0 +1,2 @@ +otlp: + promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name", "k8s.job.name", ""] diff --git a/config/testdata/otlp_sanitize_resource_attributes.good.yml b/config/testdata/otlp_sanitize_resource_attributes.good.yml new file mode 100644 index 0000000000..67247e7743 --- /dev/null +++ b/config/testdata/otlp_sanitize_resource_attributes.good.yml @@ -0,0 +1,2 @@ +otlp: + promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name"] diff --git a/config/testdata/remote_write_wrong_msg.bad.yml b/config/testdata/remote_write_wrong_msg.bad.yml new file mode 100644 index 0000000000..0918309540 --- /dev/null +++ b/config/testdata/remote_write_wrong_msg.bad.yml @@ -0,0 +1,3 @@ +remote_write: + - url: localhost:9090 + protobuf_message: io.prometheus.writet.v2.Request # typo in 'write" diff --git a/config/testdata/scrape_config_default_validation_mode.yml b/config/testdata/scrape_config_default_validation_mode.yml new file mode 100644 index 0000000000..96680d6438 --- /dev/null +++ b/config/testdata/scrape_config_default_validation_mode.yml @@ -0,0 +1,2 @@ +scrape_configs: + - job_name: prometheus diff --git a/config/testdata/scrape_config_global_validation_mode.yml b/config/testdata/scrape_config_global_validation_mode.yml new file mode 100644 index 0000000000..e9b0618c70 --- /dev/null +++ b/config/testdata/scrape_config_global_validation_mode.yml @@ -0,0 +1,4 @@ +global: + metric_name_validation_scheme: legacy +scrape_configs: + - job_name: prometheus diff --git a/config/testdata/scrape_config_local_global_validation_mode.yml b/config/testdata/scrape_config_local_global_validation_mode.yml new file mode 100644 index 0000000000..30b54834a5 --- /dev/null +++ b/config/testdata/scrape_config_local_global_validation_mode.yml @@ -0,0 +1,5 @@ +global: + metric_name_validation_scheme: legacy +scrape_configs: + - job_name: prometheus + metric_name_validation_scheme: utf8 diff --git a/config/testdata/scrape_config_local_validation_mode.yml b/config/testdata/scrape_config_local_validation_mode.yml new file mode 100644 index 0000000000..90279ff081 --- /dev/null +++ b/config/testdata/scrape_config_local_validation_mode.yml @@ -0,0 +1,3 @@ +scrape_configs: + - job_name: prometheus + metric_name_validation_scheme: legacy diff --git a/console_libraries/menu.lib b/console_libraries/menu.lib deleted file mode 100644 index 199ebf9f48..0000000000 --- a/console_libraries/menu.lib +++ /dev/null @@ -1,82 +0,0 @@ -{{/* vim: set ft=html: */}} - -{{/* Navbar, should be passed . */}} -{{ define "navbar" }} - -{{ end }} - -{{/* LHS menu, should be passed . */}} -{{ define "menu" }} -
- -
-{{ end }} - -{{/* Helper, pass (args . path name) */}} -{{ define "_menuItem" }} - -{{ end }} - diff --git a/console_libraries/prom.lib b/console_libraries/prom.lib deleted file mode 100644 index d7d436f947..0000000000 --- a/console_libraries/prom.lib +++ /dev/null @@ -1,138 +0,0 @@ -{{/* vim: set ft=html: */}} -{{/* Load Prometheus console library JS/CSS. Should go in */}} -{{ define "prom_console_head" }} - - - - - - - - - - - - - -{{ end }} - -{{/* Top of all pages. */}} -{{ define "head" -}} - - - -{{ template "prom_console_head" }} - - -{{ template "navbar" . }} - -{{ template "menu" . }} -{{ end }} - -{{ define "__prom_query_drilldown_noop" }}{{ . }}{{ end }} -{{ define "humanize" }}{{ humanize . }}{{ end }} -{{ define "humanizeNoSmallPrefix" }}{{ if and (lt . 1.0) (gt . -1.0) }}{{ printf "%.3g" . }}{{ else }}{{ humanize . }}{{ end }}{{ end }} -{{ define "humanize1024" }}{{ humanize1024 . }}{{ end }} -{{ define "humanizeDuration" }}{{ humanizeDuration . }}{{ end }} -{{ define "humanizePercentage" }}{{ humanizePercentage . }}{{ end }} -{{ define "humanizeTimestamp" }}{{ humanizeTimestamp . }}{{ end }} -{{ define "printf.1f" }}{{ printf "%.1f" . }}{{ end }} -{{ define "printf.3g" }}{{ printf "%.3g" . }}{{ end }} - -{{/* prom_query_drilldown (args expr suffix? renderTemplate?) -Displays the result of the expression, with a link to /graph for it. - -renderTemplate is the name of the template to use to render the value. -*/}} -{{ define "prom_query_drilldown" }} -{{ $expr := .arg0 }}{{ $suffix := (or .arg1 "") }}{{ $renderTemplate := (or .arg2 "__prom_query_drilldown_noop") }} -{{ with query $expr }}{{tmpl $renderTemplate ( . | first | value )}}{{ $suffix }}{{ else }}-{{ end }} -{{ end }} - -{{ define "prom_path" }}/consoles/{{ .Path }}?{{ range $param, $value := .Params }}{{ $param }}={{ $value }}&{{ end }}{{ end }}" - -{{ define "prom_right_table_head" }} -
- -{{ end }} -{{ define "prom_right_table_tail" }} -
-
-{{ end }} - -{{/* RHS table head, pass job name. Should be used after prom_right_table_head. */}} -{{ define "prom_right_table_job_head" }} - - {{ . }} - {{ template "prom_query_drilldown" (args (printf "sum(up{job='%s'})" .)) }} / {{ template "prom_query_drilldown" (args (printf "count(up{job='%s'})" .)) }} - - - CPU - {{ template "prom_query_drilldown" (args (printf "avg by(job)(irate(process_cpu_seconds_total{job='%s'}[5m]))" .) "s/s" "humanizeNoSmallPrefix") }} - - - Memory - {{ template "prom_query_drilldown" (args (printf "avg by(job)(process_resident_memory_bytes{job='%s'})" .) "B" "humanize1024") }} - -{{ end }} - - -{{ define "prom_content_head" }} -
-
-{{ template "prom_graph_timecontrol" . }} -{{ end }} -{{ define "prom_content_tail" }} -
-
-{{ end }} - -{{ define "prom_graph_timecontrol" }} -
-
-
- -
-
- -
-
-
- - - -
-
-
- -
-{{ end }} - -{{/* Bottom of all pages. */}} -{{ define "tail" }} - - -{{ end }} diff --git a/consoles/index.html.example b/consoles/index.html.example deleted file mode 100644 index c725d30dea..0000000000 --- a/consoles/index.html.example +++ /dev/null @@ -1,28 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Overview

-

These are example consoles for Prometheus.

- -

These consoles expect exporters to have the following job labels:

- - - - - - - - - - - - - -
ExporterJob label
Node Exporternode
Prometheusprometheus
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-cpu.html b/consoles/node-cpu.html deleted file mode 100644 index 284ad738f2..0000000000 --- a/consoles/node-cpu.html +++ /dev/null @@ -1,60 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - CPU(s): {{ template "prom_query_drilldown" (args (printf "scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance)) }} - -{{ range printf "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='%s'}[5m])) * 100 / scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance .Params.instance | query | sortByLabel "mode" }} - - {{ .Labels.mode | title }} CPU - {{ .Value | printf "%.1f" }}% - -{{ end }} - Misc - - Processes Running - {{ template "prom_query_drilldown" (args (printf "node_procs_running{job='node',instance='%s'}" .Params.instance) "" "humanize") }} - - - Processes Blocked - {{ template "prom_query_drilldown" (args (printf "node_procs_blocked{job='node',instance='%s'}" .Params.instance) "" "humanize") }} - - - Forks - {{ template "prom_query_drilldown" (args (printf "irate(node_forks_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - Context Switches - {{ template "prom_query_drilldown" (args (printf "irate(node_context_switches_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - Interrupts - {{ template "prom_query_drilldown" (args (printf "irate(node_intr_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - 1m Loadavg - {{ template "prom_query_drilldown" (args (printf "node_load1{job='node',instance='%s'}" .Params.instance)) }} - - - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node CPU - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

CPU Usage

-
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-disk.html b/consoles/node-disk.html deleted file mode 100644 index ffff41b797..0000000000 --- a/consoles/node-disk.html +++ /dev/null @@ -1,78 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Disks - -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} - {{ .Labels.device }} - - Utilization - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }} - - - Throughput - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }} - - - Avg Read Time - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_reads_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }} - - - Avg Write Time - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_write_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_writes_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }} - -{{ end }} - - Filesystem Fullness - -{{ define "roughlyNearZero" }} -{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} -{{ end }} -{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} - - {{ .Labels.mountpoint }} - {{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }} - -{{ end }} - - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node Disk - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

Disk I/O Utilization

-
- -

Filesystem Usage

-
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-overview.html b/consoles/node-overview.html deleted file mode 100644 index 4ae8984b99..0000000000 --- a/consoles/node-overview.html +++ /dev/null @@ -1,121 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - Overview - - User CPU - {{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }} - - - System CPU - {{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='system'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }} - - - Memory Total - {{ template "prom_query_drilldown" (args (printf "node_memory_MemTotal_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Memory Free - {{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Network - -{{ range printf "node_network_receive_bytes_total{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Received - {{ template "prom_query_drilldown" (args (printf "irate(node_network_receive_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }} - - - {{ .Labels.device }} Transmitted - {{ template "prom_query_drilldown" (args (printf "irate(node_network_transmit_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }} - -{{ end }} - - Disks - -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s',device!~'^(md\\\\d+$|dm-)'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Utilization - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }} - -{{ end }} -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Throughput - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }} - -{{ end }} - - Filesystem Fullness - -{{ define "roughlyNearZero" }} -{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} -{{ end }} -{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} - - {{ .Labels.mountpoint }} - {{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }} - -{{ end }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

CPU Usage

-
- - -

Disk I/O Utilization

-
- - -

Memory

-
- - -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node.html b/consoles/node.html deleted file mode 100644 index c1dfc1a891..0000000000 --- a/consoles/node.html +++ /dev/null @@ -1,35 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Node - {{ template "prom_query_drilldown" (args "sum(up{job='node'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='node'})") }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node

- - - - - - - - -{{ range query "up{job='node'}" | sortByLabel "instance" }} - - - Yes{{ else }} class="alert-danger">No{{ end }} - - - -{{ else }} - -{{ end }} -
NodeUpCPU
Used
Memory
Available
{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance) (sum without(mode) (irate(node_cpu_seconds_total{job='node',mode=~'idle|iowait|steal',instance='%s'}[5m]))))" .Labels.instance) "%" "printf.1f") }}{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}
No nodes found.
- - -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/prometheus-overview.html b/consoles/prometheus-overview.html deleted file mode 100644 index 08e027de06..0000000000 --- a/consoles/prometheus-overview.html +++ /dev/null @@ -1,96 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Overview - - - CPU - {{ template "prom_query_drilldown" (args (printf "irate(process_cpu_seconds_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "s/s" "humanizeNoSmallPrefix") }} - - - Memory - {{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Version - {{ with query (printf "prometheus_build_info{job='prometheus',instance='%s'}" .Params.instance) }}{{. | first | label "version"}}{{end}} - - - - Storage - - - Ingested Samples - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "/s" "humanizeNoSmallPrefix") }} - - - Head Series - {{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_head_series{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - Blocks Loaded - {{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_blocks_loaded{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - Rules - - - Evaluation Duration - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_evaluator_duration_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_evaluator_duration_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }} - - - Notification Latency - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_notifications_latency_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_notifications_latency_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }} - - - Notification Queue - {{ template "prom_query_drilldown" (args (printf "prometheus_notifications_queue_length{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - HTTP Server - -{{ range printf "prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s'}" .Params.instance | query | sortByLabel "handler" }} - - {{ .Labels.handler }} - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s',handler='%s'}[5m])" .Labels.instance .Labels.handler) "/s" "humanizeNoSmallPrefix") }} - -{{ end }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -
-

Prometheus Overview - {{ .Params.instance }}

- -

Ingested Samples

-
- - -

HTTP Server

-
- -
-{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/prometheus.html b/consoles/prometheus.html deleted file mode 100644 index e0d026376d..0000000000 --- a/consoles/prometheus.html +++ /dev/null @@ -1,34 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Prometheus - {{ template "prom_query_drilldown" (args "sum(up{job='prometheus'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='prometheus'})") }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Prometheus

- - - - - - - - -{{ range query "up{job='prometheus'}" | sortByLabel "instance" }} - - - - - - -{{ else }} - -{{ end }} -
PrometheusUpIngested SamplesMemory
{{ .Labels.instance }}Yes{{ else }} class="alert-danger">No{{ end }}{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Labels.instance) "/s" "humanizeNoSmallPrefix") }}{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Labels.instance) "B" "humanize1024")}}
No devices found.
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/discovery/discoverer_metrics_noop.go b/discovery/discoverer_metrics_noop.go index 638317ace1..4321204b6c 100644 --- a/discovery/discoverer_metrics_noop.go +++ b/discovery/discoverer_metrics_noop.go @@ -13,7 +13,7 @@ package discovery -// Create a dummy metrics struct, because this SD doesn't have any metrics. +// NoopDiscovererMetrics creates a dummy metrics struct, because this SD doesn't have any metrics. type NoopDiscovererMetrics struct{} var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil) diff --git a/discovery/discovery.go b/discovery/discovery.go index a5826f8176..a91faf6c86 100644 --- a/discovery/discovery.go +++ b/discovery/discovery.go @@ -39,7 +39,7 @@ type Discoverer interface { Run(ctx context.Context, up chan<- []*targetgroup.Group) } -// Internal metrics of service discovery mechanisms. +// DiscovererMetrics are internal metrics of service discovery mechanisms. type DiscovererMetrics interface { Register() error Unregister() @@ -56,7 +56,7 @@ type DiscovererOptions struct { HTTPClientOptions []config.HTTPClientOption } -// Metrics used by the "refresh" package. +// RefreshMetrics are used by the "refresh" package. // We define them here in the "discovery" package in order to avoid a cyclic dependency between // "discovery" and "refresh". type RefreshMetrics struct { @@ -64,17 +64,18 @@ type RefreshMetrics struct { Duration prometheus.Observer } -// Instantiate the metrics used by the "refresh" package. +// RefreshMetricsInstantiator instantiates the metrics used by the "refresh" package. type RefreshMetricsInstantiator interface { Instantiate(mech string) *RefreshMetrics } -// An interface for registering, unregistering, and instantiating metrics for the "refresh" package. -// Refresh metrics are registered and unregistered outside of the service discovery mechanism. -// This is so that the same metrics can be reused across different service discovery mechanisms. -// To manage refresh metrics inside the SD mechanism, we'd need to use const labels which are -// specific to that SD. However, doing so would also expose too many unused metrics on -// the Prometheus /metrics endpoint. +// RefreshMetricsManager is an interface for registering, unregistering, and +// instantiating metrics for the "refresh" package. Refresh metrics are +// registered and unregistered outside of the service discovery mechanism. This +// is so that the same metrics can be reused across different service discovery +// mechanisms. To manage refresh metrics inside the SD mechanism, we'd need to +// use const labels which are specific to that SD. However, doing so would also +// expose too many unused metrics on the Prometheus /metrics endpoint. type RefreshMetricsManager interface { DiscovererMetrics RefreshMetricsInstantiator @@ -145,7 +146,8 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return staticDiscoverer(c), nil } -// No metrics are needed for this service discovery mechanism. +// NewDiscovererMetrics returns NoopDiscovererMetrics because no metrics are +// needed for this service discovery mechanism. func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { return &NoopDiscovererMetrics{} } diff --git a/discovery/eureka/client.go b/discovery/eureka/client.go index 5a90968f1b..52e8ce7b48 100644 --- a/discovery/eureka/client.go +++ b/discovery/eureka/client.go @@ -97,7 +97,6 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic resp.Body.Close() }() - //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode) } diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 64155bfaed..516470b05a 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -87,7 +87,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) resp.Body.Close() }() - //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode) } diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index e877657dba..3ea98c5db9 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -970,7 +970,7 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { }.Run(t) } -// TestEndpointsUpdatePod makes sure that Endpoints discovery detects underlying Pods changes. +// TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes. // See https://github.com/prometheus/prometheus/issues/11305 for more details. func TestEndpointsDiscoveryUpdatePod(t *testing.T) { pod := &v1.Pod{ diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 7a70255c12..2ac65ef414 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -26,7 +26,6 @@ import ( "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -103,9 +102,9 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod return } - // TODO(brancz): use cache.Indexer to index endpoints by - // disv1beta1.LabelServiceName so this operation doesn't have to - // iterate over all endpoint objects. + // TODO(brancz): use cache.Indexer to index endpointslices by + // LabelServiceName so this operation doesn't have to iterate over all + // endpoint objects. for _, obj := range e.endpointSliceStore.List() { esa, err := e.getEndpointSliceAdaptor(obj) if err != nil { @@ -241,8 +240,6 @@ func (e *EndpointSlice) getEndpointSliceAdaptor(o interface{}) (endpointSliceAda switch endpointSlice := o.(type) { case *v1.EndpointSlice: return newEndpointSliceAdaptorFromV1(endpointSlice), nil - case *v1beta1.EndpointSlice: - return newEndpointSliceAdaptorFromV1beta1(endpointSlice), nil default: return nil, fmt.Errorf("received unexpected object: %v", o) } diff --git a/discovery/kubernetes/endpointslice_adaptor.go b/discovery/kubernetes/endpointslice_adaptor.go index edd64fcb32..81243e2ce0 100644 --- a/discovery/kubernetes/endpointslice_adaptor.go +++ b/discovery/kubernetes/endpointslice_adaptor.go @@ -16,7 +16,6 @@ package kubernetes import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -109,59 +108,6 @@ func (e *endpointSliceAdaptorV1) labelServiceName() string { return v1.LabelServiceName } -// Adaptor for k8s.io/api/discovery/v1beta1. -type endpointSliceAdaptorV1Beta1 struct { - endpointSlice *v1beta1.EndpointSlice -} - -func newEndpointSliceAdaptorFromV1beta1(endpointSlice *v1beta1.EndpointSlice) endpointSliceAdaptor { - return &endpointSliceAdaptorV1Beta1{endpointSlice: endpointSlice} -} - -func (e *endpointSliceAdaptorV1Beta1) get() interface{} { - return e.endpointSlice -} - -func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { - return e.endpointSlice.ObjectMeta -} - -func (e *endpointSliceAdaptorV1Beta1) name() string { - return e.endpointSlice.Name -} - -func (e *endpointSliceAdaptorV1Beta1) namespace() string { - return e.endpointSlice.Namespace -} - -func (e *endpointSliceAdaptorV1Beta1) addressType() string { - return string(e.endpointSlice.AddressType) -} - -func (e *endpointSliceAdaptorV1Beta1) endpoints() []endpointSliceEndpointAdaptor { - eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints)) - for i := 0; i < len(e.endpointSlice.Endpoints); i++ { - eps = append(eps, newEndpointSliceEndpointAdaptorFromV1beta1(e.endpointSlice.Endpoints[i])) - } - return eps -} - -func (e *endpointSliceAdaptorV1Beta1) ports() []endpointSlicePortAdaptor { - ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports)) - for i := 0; i < len(e.endpointSlice.Ports); i++ { - ports = append(ports, newEndpointSlicePortAdaptorFromV1beta1(e.endpointSlice.Ports[i])) - } - return ports -} - -func (e *endpointSliceAdaptorV1Beta1) labels() map[string]string { - return e.endpointSlice.Labels -} - -func (e *endpointSliceAdaptorV1Beta1) labelServiceName() string { - return v1beta1.LabelServiceName -} - type endpointSliceEndpointAdaptorV1 struct { endpoint v1.Endpoint } @@ -218,62 +164,6 @@ func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool { return e.endpointConditions.Terminating } -type endpointSliceEndpointAdaptorV1beta1 struct { - endpoint v1beta1.Endpoint -} - -func newEndpointSliceEndpointAdaptorFromV1beta1(endpoint v1beta1.Endpoint) endpointSliceEndpointAdaptor { - return &endpointSliceEndpointAdaptorV1beta1{endpoint: endpoint} -} - -func (e *endpointSliceEndpointAdaptorV1beta1) addresses() []string { - return e.endpoint.Addresses -} - -func (e *endpointSliceEndpointAdaptorV1beta1) hostname() *string { - return e.endpoint.Hostname -} - -func (e *endpointSliceEndpointAdaptorV1beta1) nodename() *string { - return e.endpoint.NodeName -} - -func (e *endpointSliceEndpointAdaptorV1beta1) zone() *string { - return nil -} - -func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor { - return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions) -} - -func (e *endpointSliceEndpointAdaptorV1beta1) targetRef() *corev1.ObjectReference { - return e.endpoint.TargetRef -} - -func (e *endpointSliceEndpointAdaptorV1beta1) topology() map[string]string { - return e.endpoint.Topology -} - -type endpointSliceEndpointConditionsAdaptorV1beta1 struct { - endpointConditions v1beta1.EndpointConditions -} - -func newEndpointSliceEndpointConditionsAdaptorFromV1beta1(endpointConditions v1beta1.EndpointConditions) endpointSliceEndpointConditionsAdaptor { - return &endpointSliceEndpointConditionsAdaptorV1beta1{endpointConditions: endpointConditions} -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool { - return e.endpointConditions.Ready -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool { - return e.endpointConditions.Serving -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool { - return e.endpointConditions.Terminating -} - type endpointSlicePortAdaptorV1 struct { endpointPort v1.EndpointPort } @@ -298,28 +188,3 @@ func (e *endpointSlicePortAdaptorV1) protocol() *string { func (e *endpointSlicePortAdaptorV1) appProtocol() *string { return e.endpointPort.AppProtocol } - -type endpointSlicePortAdaptorV1beta1 struct { - endpointPort v1beta1.EndpointPort -} - -func newEndpointSlicePortAdaptorFromV1beta1(port v1beta1.EndpointPort) endpointSlicePortAdaptor { - return &endpointSlicePortAdaptorV1beta1{endpointPort: port} -} - -func (e *endpointSlicePortAdaptorV1beta1) name() *string { - return e.endpointPort.Name -} - -func (e *endpointSlicePortAdaptorV1beta1) port() *int32 { - return e.endpointPort.Port -} - -func (e *endpointSlicePortAdaptorV1beta1) protocol() *string { - val := string(*e.endpointPort.Protocol) - return &val -} - -func (e *endpointSlicePortAdaptorV1beta1) appProtocol() *string { - return e.endpointPort.AppProtocol -} diff --git a/discovery/kubernetes/endpointslice_adaptor_test.go b/discovery/kubernetes/endpointslice_adaptor_test.go index 1ee3337193..de33c64b66 100644 --- a/discovery/kubernetes/endpointslice_adaptor_test.go +++ b/discovery/kubernetes/endpointslice_adaptor_test.go @@ -18,7 +18,6 @@ import ( "github.com/stretchr/testify/require" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" ) func Test_EndpointSliceAdaptor_v1(t *testing.T) { @@ -48,31 +47,3 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) { require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol()) } } - -func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) { - endpointSlice := makeEndpointSliceV1beta1() - adaptor := newEndpointSliceAdaptorFromV1beta1(endpointSlice) - - require.Equal(t, endpointSlice.ObjectMeta.Name, adaptor.name()) - require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace()) - require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType())) - require.Equal(t, endpointSlice.Labels, adaptor.labels()) - require.Equal(t, "testendpoints", endpointSlice.Labels[v1beta1.LabelServiceName]) - - for i, endpointAdaptor := range adaptor.endpoints() { - require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) - require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating()) - require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef()) - require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology()) - } - - for i, portAdaptor := range adaptor.ports() { - require.Equal(t, endpointSlice.Ports[i].Name, portAdaptor.name()) - require.Equal(t, endpointSlice.Ports[i].Port, portAdaptor.port()) - require.EqualValues(t, endpointSlice.Ports[i].Protocol, portAdaptor.protocol()) - require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol()) - } -} diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index 6ef83081be..c7e99b0a00 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -114,62 +113,8 @@ func makeEndpointSliceV1() *v1.EndpointSlice { } } -func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice { - return &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - Labels: map[string]string{ - v1beta1.LabelServiceName: "testendpoints", - }, - Annotations: map[string]string{ - "test.annotation": "test", - }, - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{ - { - Addresses: []string{"1.2.3.4"}, - Hostname: strptr("testendpoint1"), - }, { - Addresses: []string{"2.3.4.5"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - Serving: boolptr(true), - Terminating: boolptr(false), - }, - }, { - Addresses: []string{"3.4.5.6"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(false), - Serving: boolptr(true), - Terminating: boolptr(true), - }, - }, { - Addresses: []string{"4.5.6.7"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - Serving: boolptr(true), - Terminating: boolptr(false), - }, - TargetRef: &corev1.ObjectReference{ - Kind: "Node", - Name: "barbaz", - }, - }, - }, - } -} - func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.25.0") + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ discovery: n, @@ -249,71 +194,6 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { }.Run(t) } -func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "1.20.0") - - k8sDiscoveryTest{ - discovery: n, - beforeRun: func() { - obj := makeEndpointSliceV1beta1() - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: map[string]*targetgroup.Group{ - "endpointslice/default/testendpoints": { - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, - Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - }, - Source: "endpointslice/default/testendpoints", - }, - }, - }.Run(t) -} - func TestEndpointSliceDiscoveryAdd(t *testing.T) { obj := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -353,25 +233,25 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { PodIP: "1.2.3.4", }, } - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.20.0", obj) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, obj) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ + obj := &v1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ + AddressType: v1.AddressTypeIPv4, + Ports: []v1.EndpointPort{ { Name: strptr("testport"), Port: int32ptr(9000), Protocol: protocolptr(corev1.ProtocolTCP), }, }, - Endpoints: []v1beta1.Endpoint{ + Endpoints: []v1.Endpoint{ { Addresses: []string{"4.3.2.1"}, TargetRef: &corev1.ObjectReference{ @@ -379,13 +259,13 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { Name: "testpod", Namespace: "default", }, - Conditions: v1beta1.EndpointConditions{ + Conditions: v1.EndpointConditions{ Ready: boolptr(false), }, }, }, } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) + c.DiscoveryV1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ @@ -440,118 +320,34 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { } func TestEndpointSliceDiscoveryDelete(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeEndpointSliceV1() - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) + c.DiscoveryV1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Source: "endpointslice/default/testendpoints", - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "", - "__meta_kubernetes_endpointslice_address_target_name": "", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", - "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", - "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - "__meta_kubernetes_namespace": "default", - }, }, }, }.Run(t) } func TestEndpointSliceDiscoveryUpdate(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{ - { - Addresses: []string{"1.2.3.4"}, - Hostname: strptr("testendpoint1"), - }, { - Addresses: []string{"2.3.4.5"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - }, - }, - }, - } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) + obj := makeEndpointSliceV1() + obj.ObjectMeta.Labels = nil + obj.ObjectMeta.Annotations = nil + obj.Endpoints = obj.Endpoints[0:2] + c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ @@ -586,39 +382,11 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_namespace": "default", }, }, }, @@ -626,85 +394,18 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { } func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{}, - } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) + obj := makeEndpointSliceV1() + obj.Endpoints = []v1.Endpoint{} + c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "", - "__meta_kubernetes_endpointslice_address_target_name": "", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", - "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", - "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_name": "testendpoints", @@ -721,7 +422,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { } func TestEndpointSliceDiscoveryWithService(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, @@ -813,7 +514,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { } func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index 7b6366b257..4d91e7a460 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -24,7 +24,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -127,8 +126,6 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b switch ingress := o.(type) { case *v1.Ingress: ia = newIngressAdaptorFromV1(ingress) - case *v1beta1.Ingress: - ia = newIngressAdaptorFromV1beta1(ingress) default: level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err", fmt.Errorf("received unexpected object: %v", o)) diff --git a/discovery/kubernetes/ingress_adaptor.go b/discovery/kubernetes/ingress_adaptor.go index d1a7b7f2a2..84281196b4 100644 --- a/discovery/kubernetes/ingress_adaptor.go +++ b/discovery/kubernetes/ingress_adaptor.go @@ -15,7 +15,6 @@ package kubernetes import ( v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -89,56 +88,3 @@ func (i *ingressRuleAdaptorV1) paths() []string { } func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host } - -// Adaptor for networking.k8s.io/v1beta1. -type ingressAdaptorV1Beta1 struct { - ingress *v1beta1.Ingress -} - -func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor { - return &ingressAdaptorV1Beta1{ingress: ingress} -} -func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } -func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } - -func (i *ingressAdaptorV1Beta1) tlsHosts() []string { - var hosts []string - for _, tls := range i.ingress.Spec.TLS { - hosts = append(hosts, tls.Hosts...) - } - return hosts -} - -func (i *ingressAdaptorV1Beta1) rules() []ingressRuleAdaptor { - var rules []ingressRuleAdaptor - for _, rule := range i.ingress.Spec.Rules { - rules = append(rules, newIngressRuleAdaptorFromV1Beta1(rule)) - } - return rules -} - -type ingressRuleAdaptorV1Beta1 struct { - rule v1beta1.IngressRule -} - -func newIngressRuleAdaptorFromV1Beta1(rule v1beta1.IngressRule) ingressRuleAdaptor { - return &ingressRuleAdaptorV1Beta1{rule: rule} -} - -func (i *ingressRuleAdaptorV1Beta1) paths() []string { - rv := i.rule.IngressRuleValue - if rv.HTTP == nil { - return nil - } - paths := make([]string, len(rv.HTTP.Paths)) - for n, p := range rv.HTTP.Paths { - paths[n] = p.Path - } - return paths -} - -func (i *ingressRuleAdaptorV1Beta1) host() string { return i.rule.Host } diff --git a/discovery/kubernetes/ingress_test.go b/discovery/kubernetes/ingress_test.go index 8e6654c2cc..9bddfb1e14 100644 --- a/discovery/kubernetes/ingress_test.go +++ b/discovery/kubernetes/ingress_test.go @@ -20,7 +20,6 @@ import ( "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -89,60 +88,6 @@ func makeIngress(tls TLSMode) *v1.Ingress { return ret } -func makeIngressV1beta1(tls TLSMode) *v1beta1.Ingress { - ret := &v1beta1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testingress", - Namespace: "default", - Labels: map[string]string{"test/label": "testvalue"}, - Annotations: map[string]string{"test/annotation": "testannotationvalue"}, - }, - Spec: v1beta1.IngressSpec{ - IngressClassName: classString("testclass"), - TLS: nil, - Rules: []v1beta1.IngressRule{ - { - Host: "example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{ - {Path: "/"}, - {Path: "/foo"}, - }, - }, - }, - }, - { - // No backend config, ignored - Host: "nobackend.example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{}, - }, - }, - { - Host: "test.example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{{}}, - }, - }, - }, - }, - }, - } - - switch tls { - case TLSYes: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}} - case TLSMixed: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com"}}} - case TLSWildcard: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"*.example.com"}}} - } - - return ret -} - func classString(v string) *string { return &v } @@ -212,20 +157,6 @@ func TestIngressDiscoveryAdd(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSNo) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSNo), - }.Run(t) -} - func TestIngressDiscoveryAddTLS(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) @@ -240,20 +171,6 @@ func TestIngressDiscoveryAddTLS(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddTLSV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSYes) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSYes), - }.Run(t) -} - func TestIngressDiscoveryAddMixed(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) @@ -268,20 +185,6 @@ func TestIngressDiscoveryAddMixed(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddMixedV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSMixed) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSMixed), - }.Run(t) -} - func TestIngressDiscoveryNamespaces(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) @@ -303,27 +206,6 @@ func TestIngressDiscoveryNamespaces(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryNamespacesV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}, "v1.18.0") - - expected := expectedTargetGroups("ns1", TLSNo) - for k, v := range expectedTargetGroups("ns2", TLSNo) { - expected[k] = v - } - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - for _, ns := range []string{"ns1", "ns2"} { - obj := makeIngressV1beta1(TLSNo) - obj.Namespace = ns - c.NetworkingV1beta1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) - } - }, - expectedMaxItems: 2, - expectedRes: expected, - }.Run(t) -} - func TestIngressDiscoveryOwnNamespace(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true}) diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index a8b6f85899..93ac65d8dc 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -25,8 +25,6 @@ import ( "github.com/prometheus/prometheus/util/strutil" - disv1beta1 "k8s.io/api/discovery/v1beta1" - "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -36,12 +34,10 @@ import ( apiv1 "k8s.io/api/core/v1" disv1 "k8s.io/api/discovery/v1" networkv1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -401,55 +397,22 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { switch d.role { case RoleEndpointSlice: - // Check "networking.k8s.io/v1" availability with retries. - // If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility - var v1Supported bool - if retryOnError(ctx, 10*time.Second, - func() (err error) { - v1Supported, err = checkDiscoveryV1Supported(d.client) - if err != nil { - level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err) - } - return err - }, - ) { - d.Unlock() - return - } - for _, namespace := range namespaces { var informer cache.SharedIndexInformer - if v1Supported { - e := d.client.DiscoveryV1().EndpointSlices(namespace) - elw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.Watch(ctx, options) - }, - } - informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{}) - } else { - e := d.client.DiscoveryV1beta1().EndpointSlices(namespace) - elw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.Watch(ctx, options) - }, - } - informer = d.newEndpointSlicesByNodeInformer(elw, &disv1beta1.EndpointSlice{}) + e := d.client.DiscoveryV1().EndpointSlices(namespace) + elw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.endpointslice.field + options.LabelSelector = d.selectors.endpointslice.label + return e.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.endpointslice.field + options.LabelSelector = d.selectors.endpointslice.label + return e.Watch(ctx, options) + }, } + informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{}) s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ @@ -609,55 +572,22 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { go svc.informer.Run(ctx.Done()) } case RoleIngress: - // Check "networking.k8s.io/v1" availability with retries. - // If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility - var v1Supported bool - if retryOnError(ctx, 10*time.Second, - func() (err error) { - v1Supported, err = checkNetworkingV1Supported(d.client) - if err != nil { - level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err) - } - return err - }, - ) { - d.Unlock() - return - } - for _, namespace := range namespaces { var informer cache.SharedInformer - if v1Supported { - i := d.client.NetworkingV1().Ingresses(namespace) - ilw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.Watch(ctx, options) - }, - } - informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) - } else { - i := d.client.NetworkingV1beta1().Ingresses(namespace) - ilw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.Watch(ctx, options) - }, - } - informer = d.mustNewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled) + i := d.client.NetworkingV1().Ingresses(namespace) + ilw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label + return i.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label + return i.Watch(ctx, options) + }, } + informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) ingress := NewIngress( log.With(d.logger, "role", "ingress"), informer, @@ -720,20 +650,6 @@ func retryOnError(ctx context.Context, interval time.Duration, f func() error) ( } } -func checkNetworkingV1Supported(client kubernetes.Interface) (bool, error) { - k8sVer, err := client.Discovery().ServerVersion() - if err != nil { - return false, err - } - semVer, err := utilversion.ParseSemantic(k8sVer.String()) - if err != nil { - return false, err - } - // networking.k8s.io/v1 is available since Kubernetes v1.19 - // https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md - return semVer.Major() >= 1 && semVer.Minor() >= 19, nil -} - func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer { nlw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { @@ -834,19 +750,6 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object } } } - case *disv1beta1.EndpointSlice: - for _, target := range e.Endpoints { - if target.TargetRef != nil { - switch target.TargetRef.Kind { - case "Pod": - if target.NodeName != nil { - nodes = append(nodes, *target.NodeName) - } - case "Node": - nodes = append(nodes, target.TargetRef.Name) - } - } - } default: return nil, fmt.Errorf("object is not an endpointslice") } @@ -882,21 +785,6 @@ func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleOb return informer } -func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { - k8sVer, err := client.Discovery().ServerVersion() - if err != nil { - return false, err - } - semVer, err := utilversion.ParseSemantic(k8sVer.String()) - if err != nil { - return false, err - } - // The discovery.k8s.io/v1beta1 API version of EndpointSlice will no longer be served in v1.25. - // discovery.k8s.io/v1 is available since Kubernetes v1.21 - // https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25 - return semVer.Major() >= 1 && semVer.Minor() >= 21, nil -} - func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) { labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name) diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index 552f8a4453..a026366502 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -46,7 +46,7 @@ func TestMain(m *testing.M) { // makeDiscovery creates a kubernetes.Discovery instance for testing. func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) { - return makeDiscoveryWithVersion(role, nsDiscovery, "v1.22.0", objects...) + return makeDiscoveryWithVersion(role, nsDiscovery, "v1.25.0", objects...) } // makeDiscoveryWithVersion creates a kubernetes.Discovery instance with the specified kubernetes version for testing. @@ -154,7 +154,7 @@ func (d k8sDiscoveryTest) Run(t *testing.T) { // readResultWithTimeout reads all targetgroups from channel with timeout. // It merges targetgroups by source and sends the result to result channel. -func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, max int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) { +func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) { res := make(map[string]*targetgroup.Group) timeout := time.After(stopAfter) Loop: @@ -167,7 +167,7 @@ Loop: } res[tg.Source] = tg } - if len(res) == max { + if len(res) == maxGroups { // Reached max target groups we may get, break fast. break Loop } @@ -175,10 +175,10 @@ Loop: // Because we use queue, an object that is created then // deleted or updated may be processed only once. // So possibly we may skip events, timed out here. - t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max) + t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups) break Loop case <-ctx.Done(): - t.Logf("stopped, got %d (max: %d) items", len(res), max) + t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups) break Loop } } @@ -285,40 +285,6 @@ func TestRetryOnError(t *testing.T) { } } -func TestCheckNetworkingV1Supported(t *testing.T) { - tests := []struct { - version string - wantSupported bool - wantErr bool - }{ - {version: "v1.18.0", wantSupported: false, wantErr: false}, - {version: "v1.18.1", wantSupported: false, wantErr: false}, - // networking v1 is supported since Kubernetes v1.19 - {version: "v1.19.0", wantSupported: true, wantErr: false}, - {version: "v1.20.0-beta.2", wantSupported: true, wantErr: false}, - // error patterns - {version: "", wantSupported: false, wantErr: true}, - {version: "<>", wantSupported: false, wantErr: true}, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.version, func(t *testing.T) { - clientset := fake.NewSimpleClientset() - fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery) - fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: tc.version} - supported, err := checkNetworkingV1Supported(clientset) - - if tc.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - require.Equal(t, tc.wantSupported, supported) - }) - } -} - func TestFailuresCountMetric(t *testing.T) { tests := []struct { role Role diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go deleted file mode 100644 index 6fc61485d1..0000000000 --- a/discovery/legacymanager/manager.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "context" - "fmt" - "reflect" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" -) - -type poolKey struct { - setName string - provider string -} - -// provider holds a Discoverer instance, its configuration and its subscribers. -type provider struct { - name string - d discovery.Discoverer - subs []string - config interface{} -} - -// NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]discovery.DiscovererMetrics, options ...func(*Manager)) *Manager { - if logger == nil { - logger = log.NewNopLogger() - } - mgr := &Manager{ - logger: logger, - syncCh: make(chan map[string][]*targetgroup.Group), - targets: make(map[poolKey]map[string]*targetgroup.Group), - discoverCancel: []context.CancelFunc{}, - ctx: ctx, - updatert: 5 * time.Second, - triggerSend: make(chan struct{}, 1), - registerer: registerer, - sdMetrics: sdMetrics, - } - for _, option := range options { - option(mgr) - } - - // Register the metrics. - // We have to do this after setting all options, so that the name of the Manager is set. - if metrics, err := discovery.NewManagerMetrics(registerer, mgr.name); err == nil { - mgr.metrics = metrics - } else { - level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) - return nil - } - - return mgr -} - -// Name sets the name of the manager. -func Name(n string) func(*Manager) { - return func(m *Manager) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.name = n - } -} - -// Manager maintains a set of discovery providers and sends each update to a map channel. -// Targets are grouped by the target set name. -type Manager struct { - logger log.Logger - name string - mtx sync.RWMutex - ctx context.Context - discoverCancel []context.CancelFunc - - // Some Discoverers(eg. k8s) send only the updates for a given target group - // so we use map[tg.Source]*targetgroup.Group to know which group to update. - targets map[poolKey]map[string]*targetgroup.Group - // providers keeps track of SD providers. - providers []*provider - // The sync channel sends the updates as a map where the key is the job value from the scrape config. - syncCh chan map[string][]*targetgroup.Group - - // How long to wait before sending updates to the channel. The variable - // should only be modified in unit tests. - updatert time.Duration - - // The triggerSend channel signals to the manager that new updates have been received from providers. - triggerSend chan struct{} - - // A registerer for all service discovery metrics. - registerer prometheus.Registerer - - metrics *discovery.Metrics - sdMetrics map[string]discovery.DiscovererMetrics -} - -// Run starts the background processing. -func (m *Manager) Run() error { - go m.sender() - <-m.ctx.Done() - m.cancelDiscoverers() - return m.ctx.Err() -} - -// SyncCh returns a read only channel used by all the clients to receive target updates. -func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group { - return m.syncCh -} - -// ApplyConfig removes all running discovery providers and starts new ones using the provided config. -func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - for pk := range m.targets { - if _, ok := cfg[pk.setName]; !ok { - m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, pk.setName) - } - } - m.cancelDiscoverers() - m.targets = make(map[poolKey]map[string]*targetgroup.Group) - m.providers = nil - m.discoverCancel = nil - - failedCount := 0 - for name, scfg := range cfg { - failedCount += m.registerProviders(scfg, name) - m.metrics.DiscoveredTargets.WithLabelValues(name).Set(0) - } - m.metrics.FailedConfigs.Set(float64(failedCount)) - - for _, prov := range m.providers { - m.startProvider(m.ctx, prov) - } - - return nil -} - -// StartCustomProvider is used for sdtool. Only use this if you know what you're doing. -func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker discovery.Discoverer) { - p := &provider{ - name: name, - d: worker, - subs: []string{name}, - } - m.providers = append(m.providers, p) - m.startProvider(ctx, p) -} - -func (m *Manager) startProvider(ctx context.Context, p *provider) { - level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) - ctx, cancel := context.WithCancel(ctx) - updates := make(chan []*targetgroup.Group) - - m.discoverCancel = append(m.discoverCancel, cancel) - - go p.d.Run(ctx, updates) - go m.updater(ctx, p, updates) -} - -func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) { - for { - select { - case <-ctx.Done(): - return - case tgs, ok := <-updates: - m.metrics.ReceivedUpdates.Inc() - if !ok { - level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) - return - } - - for _, s := range p.subs { - m.updateGroup(poolKey{setName: s, provider: p.name}, tgs) - } - - select { - case m.triggerSend <- struct{}{}: - default: - } - } - } -} - -func (m *Manager) sender() { - ticker := time.NewTicker(m.updatert) - defer ticker.Stop() - - for { - select { - case <-m.ctx.Done(): - return - case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker. - select { - case <-m.triggerSend: - m.metrics.SentUpdates.Inc() - select { - case m.syncCh <- m.allGroups(): - default: - m.metrics.DelayedUpdates.Inc() - level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") - select { - case m.triggerSend <- struct{}{}: - default: - } - } - default: - } - } - } -} - -func (m *Manager) cancelDiscoverers() { - for _, c := range m.discoverCancel { - c() - } -} - -func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if _, ok := m.targets[poolKey]; !ok { - m.targets[poolKey] = make(map[string]*targetgroup.Group) - } - for _, tg := range tgs { - if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. - m.targets[poolKey][tg.Source] = tg - } - } -} - -func (m *Manager) allGroups() map[string][]*targetgroup.Group { - m.mtx.RLock() - defer m.mtx.RUnlock() - - tSets := map[string][]*targetgroup.Group{} - n := map[string]int{} - for pkey, tsets := range m.targets { - for _, tg := range tsets { - // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' - // to signal that it needs to stop all scrape loops for this target set. - tSets[pkey.setName] = append(tSets[pkey.setName], tg) - n[pkey.setName] += len(tg.Targets) - } - } - for setName, v := range n { - m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) - } - return tSets -} - -// registerProviders returns a number of failed SD config. -func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int { - var ( - failed int - added bool - ) - add := func(cfg discovery.Config) { - for _, p := range m.providers { - if reflect.DeepEqual(cfg, p.config) { - p.subs = append(p.subs, setName) - added = true - return - } - } - typ := cfg.Name() - d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), - Metrics: m.sdMetrics[typ], - }) - if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) - failed++ - return - } - m.providers = append(m.providers, &provider{ - name: fmt.Sprintf("%s/%d", typ, len(m.providers)), - d: d, - config: cfg, - subs: []string{setName}, - }) - added = true - } - for _, cfg := range cfgs { - add(cfg) - } - if !added { - // Add an empty target group to force the refresh of the corresponding - // scrape pool and to notify the receiver that this target set has no - // current targets. - // It can happen because the combined set of SD configurations is empty - // or because we fail to instantiate all the SD configurations. - add(discovery.StaticConfig{{}}) - } - return failed -} - -// StaticProvider holds a list of target groups that never change. -type StaticProvider struct { - TargetGroups []*targetgroup.Group -} - -// Run implements the Worker interface. -func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { - // We still have to consider that the consumer exits right away in which case - // the context will be canceled. - select { - case ch <- sd.TargetGroups: - case <-ctx.Done(): - } - close(ch) -} diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go deleted file mode 100644 index a455a8e341..0000000000 --- a/discovery/legacymanager/manager_test.go +++ /dev/null @@ -1,1186 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "context" - "fmt" - "sort" - "strconv" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" - client_testutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/testutil" -) - -func TestMain(m *testing.M) { - testutil.TolerantVerifyLeak(m) -} - -func newTestMetrics(t *testing.T, reg prometheus.Registerer) (*discovery.RefreshMetricsManager, map[string]discovery.DiscovererMetrics) { - refreshMetrics := discovery.NewRefreshMetrics(reg) - sdMetrics, err := discovery.RegisterSDMetrics(reg, refreshMetrics) - require.NoError(t, err) - return &refreshMetrics, sdMetrics -} - -// TestTargetUpdatesOrder checks that the target updates are received in the expected order. -func TestTargetUpdatesOrder(t *testing.T) { - // The order by which the updates are send is determined by the interval passed to the mock discovery adapter - // Final targets array is ordered alphabetically by the name of the discoverer. - // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. - testCases := []struct { - title string - updates map[string][]update - expectedTargets [][]*targetgroup.Group - }{ - { - title: "Single TP no updates", - updates: map[string][]update{ - "tp1": {}, - }, - expectedTargets: nil, - }, - { - title: "Multiple TPs no updates", - updates: map[string][]update{ - "tp1": {}, - "tp2": {}, - "tp3": {}, - }, - expectedTargets: nil, - }, - { - title: "Single TP empty initials", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{}, - interval: 5 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - {}, - }, - }, - { - title: "Multiple TPs empty initials", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{}, - interval: 5 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{}, - interval: 200 * time.Millisecond, - }, - }, - "tp3": { - { - targetGroups: []targetgroup.Group{}, - interval: 100 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - {}, - {}, - {}, - }, - }, - { - title: "Single TP initials only", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - { - title: "Multiple TPs initials only", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - }, - }, - }, - { - title: "Single TP initials followed by empty updates", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 0, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - }, - }, - { - title: "Single TP initials and new groups", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 0, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - title: "Multiple TPs initials and new groups", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group4", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 500 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - interval: 100 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group4", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - }, - }, - { - title: "One TP initials arrive after other TP updates.", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 150 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - interval: 200 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - interval: 100 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - }, - }, - - { - title: "Single TP empty update in between", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 30 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 300 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - }, - }, - } - - for i, tc := range testCases { - tc := tc - t.Run(tc.title, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - - var totalUpdatesCount int - for _, up := range tc.updates { - if len(up) > 0 { - totalUpdatesCount += len(up) - } - } - provUpdates := make(chan []*targetgroup.Group, totalUpdatesCount) - - for _, up := range tc.updates { - go newMockDiscoveryProvider(up...).Run(ctx, provUpdates) - } - - for x := 0; x < totalUpdatesCount; x++ { - select { - case <-ctx.Done(): - t.Fatalf("%d: no update arrived within the timeout limit", x) - case tgs := <-provUpdates: - discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) - for _, got := range discoveryManager.allGroups() { - assertEqualGroups(t, got, tc.expectedTargets[x]) - } - } - } - }) - } -} - -func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) { - t.Helper() - - // Need to sort by the groups's source as the received order is not guaranteed. - sort.Sort(byGroupSource(got)) - sort.Sort(byGroupSource(expected)) - - require.Equal(t, expected, got) -} - -func staticConfig(addrs ...string) discovery.StaticConfig { - var cfg discovery.StaticConfig - for i, addr := range addrs { - cfg = append(cfg, &targetgroup.Group{ - Source: strconv.Itoa(i), - Targets: []model.LabelSet{ - {model.AddressLabel: model.LabelValue(addr)}, - }, - }) - } - return cfg -} - -func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) { - t.Helper() - if _, ok := tSets[poolKey]; !ok { - t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) - } - - match := false - var mergedTargets string - for _, targetGroup := range tSets[poolKey] { - for _, l := range targetGroup.Targets { - mergedTargets = mergedTargets + " " + l.String() - if l.String() == label { - match = true - } - } - } - if match != present { - msg := "" - if !present { - msg = "not" - } - t.Fatalf("%q should %s be present in Targets labels: %q", label, msg, mergedTargets) - } -} - -func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090", "bar:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - - c["prometheus"] = discovery.Configs{ - staticConfig("foo:9090"), - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false) -} - -func TestDiscovererConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090", "bar:9090"), - staticConfig("baz:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true) -} - -// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after -// removing all targets from the static_configs sends an update with empty targetGroups. -// This is required to signal the receiver that this target set has no current targets. -func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - - c["prometheus"] = discovery.Configs{ - discovery.StaticConfig{{}}, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - - pkey := poolKey{setName: "prometheus", provider: "static/0"} - targetGroups, ok := discoveryManager.targets[pkey] - if !ok { - t.Fatalf("'%v' should be present in target groups", pkey) - } - group, ok := targetGroups[""] - if !ok { - t.Fatalf("missing '' key in target groups %v", targetGroups) - } - - if len(group.Targets) != 0 { - t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) - } -} - -func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, nil, reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090"), - }, - "prometheus2": { - staticConfig("foo:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - if len(discoveryManager.providers) != 1 { - t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) - } -} - -func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { - originalConfig := discovery.Configs{ - staticConfig("foo:9090", "bar:9090", "baz:9090"), - } - processedConfig := discovery.Configs{ - staticConfig("foo:9090", "bar:9090", "baz:9090"), - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - cfgs := map[string]discovery.Configs{ - "prometheus": processedConfig, - } - discoveryManager.ApplyConfig(cfgs) - <-discoveryManager.SyncCh() - - for _, cfg := range cfgs { - require.Equal(t, originalConfig, cfg) - } -} - -type errorConfig struct{ err error } - -func (e errorConfig) Name() string { return "error" } -func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Discoverer, error) { - return nil, e.err -} - -// NewDiscovererMetrics implements discovery.Config. -func (errorConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { - return &discovery.NoopDiscovererMetrics{} -} - -func TestGaugeFailedConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - errorConfig{fmt.Errorf("tests error 0")}, - errorConfig{fmt.Errorf("tests error 1")}, - errorConfig{fmt.Errorf("tests error 2")}, - }, - } - discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - failedCount := client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) - if failedCount != 3 { - t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount) - } - - c["prometheus"] = discovery.Configs{ - staticConfig("foo:9090"), - } - discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - failedCount = client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) - if failedCount != 0 { - t.Fatalf("Expected to get no failed config, got: %v", failedCount) - } -} - -func TestCoordinationWithReceiver(t *testing.T) { - updateDelay := 100 * time.Millisecond - - type expect struct { - delay time.Duration - tgs map[string][]*targetgroup.Group - } - - testCases := []struct { - title string - providers map[string]discovery.Discoverer - expected []expect - }{ - { - title: "Receiver should get all updates even when one provider closes its channel", - providers: map[string]discovery.Discoverer{ - "once1": &onceProvider{ - tgs: []*targetgroup.Group{ - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - "mock1": newMockDiscoveryProvider( - update{ - interval: 2 * updateDelay, - targetGroups: []targetgroup.Group{ - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - ), - }, - expected: []expect{ - { - tgs: map[string][]*targetgroup.Group{ - "once1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - tgs: map[string][]*targetgroup.Group{ - "once1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - "mock1": { - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - }, - { - title: "Receiver should get all updates even when the channel is blocked", - providers: map[string]discovery.Discoverer{ - "mock1": newMockDiscoveryProvider( - update{ - targetGroups: []targetgroup.Group{ - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - update{ - interval: 4 * updateDelay, - targetGroups: []targetgroup.Group{ - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - ), - }, - expected: []expect{ - { - delay: 2 * updateDelay, - tgs: map[string][]*targetgroup.Group{ - "mock1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - delay: 4 * updateDelay, - tgs: map[string][]*targetgroup.Group{ - "mock1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.title, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - mgr := NewManager(ctx, nil, reg, sdMetrics) - require.NotNil(t, mgr) - mgr.updatert = updateDelay - go mgr.Run() - - for name, p := range tc.providers { - mgr.StartCustomProvider(ctx, name, p) - } - - for i, expected := range tc.expected { - time.Sleep(expected.delay) - select { - case <-ctx.Done(): - t.Fatalf("step %d: no update received in the expected timeframe", i) - case tgs, ok := <-mgr.SyncCh(): - if !ok { - t.Fatalf("step %d: discovery manager channel is closed", i) - } - if len(tgs) != len(expected.tgs) { - t.Fatalf("step %d: target groups mismatch, got: %d, expected: %d\ngot: %#v\nexpected: %#v", - i, len(tgs), len(expected.tgs), tgs, expected.tgs) - } - for k := range expected.tgs { - if _, ok := tgs[k]; !ok { - t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) - } - assertEqualGroups(t, tgs[k], expected.tgs[k]) - } - } - } - }) - } -} - -type update struct { - targetGroups []targetgroup.Group - interval time.Duration -} - -type mockdiscoveryProvider struct { - updates []update -} - -func newMockDiscoveryProvider(updates ...update) mockdiscoveryProvider { - tp := mockdiscoveryProvider{ - updates: updates, - } - return tp -} - -func (tp mockdiscoveryProvider) Run(ctx context.Context, upCh chan<- []*targetgroup.Group) { - for _, u := range tp.updates { - if u.interval > 0 { - select { - case <-ctx.Done(): - return - case <-time.After(u.interval): - } - } - tgs := make([]*targetgroup.Group, len(u.targetGroups)) - for i := range u.targetGroups { - tgs[i] = &u.targetGroups[i] - } - upCh <- tgs - } - <-ctx.Done() -} - -// byGroupSource implements sort.Interface so we can sort by the Source field. -type byGroupSource []*targetgroup.Group - -func (a byGroupSource) Len() int { return len(a) } -func (a byGroupSource) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byGroupSource) Less(i, j int) bool { return a[i].Source < a[j].Source } - -// onceProvider sends updates once (if any) and closes the update channel. -type onceProvider struct { - tgs []*targetgroup.Group -} - -func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { - if len(o.tgs) > 0 { - ch <- o.tgs - } - close(ch) -} diff --git a/discovery/legacymanager/registry.go b/discovery/legacymanager/registry.go deleted file mode 100644 index 955705394d..0000000000 --- a/discovery/legacymanager/registry.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "sync" - - "gopkg.in/yaml.v2" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" -) - -const ( - configFieldPrefix = "AUTO_DISCOVERY_" - staticConfigsKey = "static_configs" - staticConfigsFieldName = configFieldPrefix + staticConfigsKey -) - -var ( - configNames = make(map[string]discovery.Config) - configFieldNames = make(map[reflect.Type]string) - configFields []reflect.StructField - - configTypesMu sync.Mutex - configTypes = make(map[reflect.Type]reflect.Type) - - emptyStructType = reflect.TypeOf(struct{}{}) - configsType = reflect.TypeOf(discovery.Configs{}) -) - -// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling. -func RegisterConfig(config discovery.Config) { - registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config) -} - -func init() { - // N.B.: static_configs is the only Config type implemented by default. - // All other types are registered at init by their implementing packages. - elemTyp := reflect.TypeOf(&targetgroup.Group{}) - registerConfig(staticConfigsKey, elemTyp, discovery.StaticConfig{}) -} - -func registerConfig(yamlKey string, elemType reflect.Type, config discovery.Config) { - name := config.Name() - if _, ok := configNames[name]; ok { - panic(fmt.Sprintf("discovery: Config named %q is already registered", name)) - } - configNames[name] = config - - fieldName := configFieldPrefix + yamlKey // Field must be exported. - configFieldNames[elemType] = fieldName - - // Insert fields in sorted order. - i := sort.Search(len(configFields), func(k int) bool { - return fieldName < configFields[k].Name - }) - configFields = append(configFields, reflect.StructField{}) // Add empty field at end. - copy(configFields[i+1:], configFields[i:]) // Shift fields to the right. - configFields[i] = reflect.StructField{ // Write new field in place. - Name: fieldName, - Type: reflect.SliceOf(elemType), - Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`), - } -} - -func getConfigType(out reflect.Type) reflect.Type { - configTypesMu.Lock() - defer configTypesMu.Unlock() - if typ, ok := configTypes[out]; ok { - return typ - } - // Initial exported fields map one-to-one. - var fields []reflect.StructField - for i, n := 0, out.NumField(); i < n; i++ { - switch field := out.Field(i); { - case field.PkgPath == "" && field.Type != configsType: - fields = append(fields, field) - default: - fields = append(fields, reflect.StructField{ - Name: "_" + field.Name, // Field must be unexported. - PkgPath: out.PkgPath(), - Type: emptyStructType, - }) - } - } - // Append extra config fields on the end. - fields = append(fields, configFields...) - typ := reflect.StructOf(fields) - configTypes[out] = typ - return typ -} - -// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs -// that have a Configs field that should be inlined. -func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error { - outVal := reflect.ValueOf(out) - if outVal.Kind() != reflect.Ptr { - return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) - } - outVal = outVal.Elem() - if outVal.Kind() != reflect.Struct { - return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) - } - outTyp := outVal.Type() - - cfgTyp := getConfigType(outTyp) - cfgPtr := reflect.New(cfgTyp) - cfgVal := cfgPtr.Elem() - - // Copy shared fields (defaults) to dynamic value. - var configs *discovery.Configs - for i, n := 0, outVal.NumField(); i < n; i++ { - if outTyp.Field(i).Type == configsType { - configs = outVal.Field(i).Addr().Interface().(*discovery.Configs) - continue - } - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - cfgVal.Field(i).Set(outVal.Field(i)) - } - if configs == nil { - return fmt.Errorf("discovery: Configs field not found in type: %T", out) - } - - // Unmarshal into dynamic value. - if err := unmarshal(cfgPtr.Interface()); err != nil { - return replaceYAMLTypeError(err, cfgTyp, outTyp) - } - - // Copy shared fields from dynamic value. - for i, n := 0, outVal.NumField(); i < n; i++ { - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - outVal.Field(i).Set(cfgVal.Field(i)) - } - - var err error - *configs, err = readConfigs(cfgVal, outVal.NumField()) - return err -} - -func readConfigs(structVal reflect.Value, startField int) (discovery.Configs, error) { - var ( - configs discovery.Configs - targets []*targetgroup.Group - ) - for i, n := startField, structVal.NumField(); i < n; i++ { - field := structVal.Field(i) - if field.Kind() != reflect.Slice { - panic("discovery: internal error: field is not a slice") - } - for k := 0; k < field.Len(); k++ { - val := field.Index(k) - if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) { - key := configFieldNames[field.Type().Elem()] - key = strings.TrimPrefix(key, configFieldPrefix) - return nil, fmt.Errorf("empty or null section in %s", key) - } - switch c := val.Interface().(type) { - case *targetgroup.Group: - // Add index to the static config target groups for unique identification - // within scrape pool. - c.Source = strconv.Itoa(len(targets)) - // Coalesce multiple static configs into a single static config. - targets = append(targets, c) - case discovery.Config: - configs = append(configs, c) - default: - panic("discovery: internal error: slice element is not a Config") - } - } - } - if len(targets) > 0 { - configs = append(configs, discovery.StaticConfig(targets)) - } - return configs, nil -} - -// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs -// that have a Configs field that should be inlined. -func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) { - inVal := reflect.ValueOf(in) - for inVal.Kind() == reflect.Ptr { - inVal = inVal.Elem() - } - inTyp := inVal.Type() - - cfgTyp := getConfigType(inTyp) - cfgPtr := reflect.New(cfgTyp) - cfgVal := cfgPtr.Elem() - - // Copy shared fields to dynamic value. - var configs *discovery.Configs - for i, n := 0, inTyp.NumField(); i < n; i++ { - if inTyp.Field(i).Type == configsType { - configs = inVal.Field(i).Addr().Interface().(*discovery.Configs) - } - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - cfgVal.Field(i).Set(inVal.Field(i)) - } - if configs == nil { - return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in) - } - - if err := writeConfigs(cfgVal, *configs); err != nil { - return nil, err - } - - return cfgPtr.Interface(), nil -} - -func writeConfigs(structVal reflect.Value, configs discovery.Configs) error { - targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group) - for _, c := range configs { - if sc, ok := c.(discovery.StaticConfig); ok { - *targets = append(*targets, sc...) - continue - } - fieldName, ok := configFieldNames[reflect.TypeOf(c)] - if !ok { - return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c) - } - field := structVal.FieldByName(fieldName) - field.Set(reflect.Append(field, reflect.ValueOf(c))) - } - return nil -} - -func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { - var e *yaml.TypeError - if errors.As(err, &e) { - oldStr := oldTyp.String() - newStr := newTyp.String() - for i, s := range e.Errors { - e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) - } - } - return err -} diff --git a/discovery/manager.go b/discovery/manager.go index 897d7d151c..cefa90a866 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -64,7 +64,7 @@ func (p *Provider) Config() interface{} { return p.config } -// Registers the metrics needed for SD mechanisms. +// CreateAndRegisterSDMetrics registers the metrics needed for SD mechanisms. // Does not register the metrics for the Discovery Manager. // TODO(ptodev): Add ability to unregister the metrics? func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) { @@ -212,9 +212,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { m.metrics.FailedConfigs.Set(float64(failedCount)) var ( - wg sync.WaitGroup - // keep shows if we keep any providers after reload. - keep bool + wg sync.WaitGroup newProviders []*Provider ) for _, prov := range m.providers { @@ -228,13 +226,12 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { continue } newProviders = append(newProviders, prov) - // refTargets keeps reference targets used to populate new subs' targets + // refTargets keeps reference targets used to populate new subs' targets as they should be the same. var refTargets map[string]*targetgroup.Group prov.mu.Lock() m.targetsMtx.Lock() for s := range prov.subs { - keep = true refTargets = m.targets[poolKey{s, prov.name}] // Remove obsolete subs' targets. if _, ok := prov.newSubs[s]; !ok { @@ -267,7 +264,9 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { // While startProvider does pull the trigger, it may take some time to do so, therefore // we pull the trigger as soon as possible so that downstream managers can populate their state. // See https://github.com/prometheus/prometheus/pull/8639 for details. - if keep { + // This also helps making the downstream managers drop stale targets as soon as possible. + // See https://github.com/prometheus/prometheus/pull/13147 for details. + if len(m.providers) > 0 { select { case m.triggerSend <- struct{}{}: default: @@ -288,7 +287,9 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D name: {}, }, } + m.mtx.Lock() m.providers = append(m.providers, p) + m.mtx.Unlock() m.startProvider(ctx, p) } @@ -393,8 +394,16 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { m.targets[poolKey] = make(map[string]*targetgroup.Group) } for _, tg := range tgs { - if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. + // Some Discoverers send nil target group so need to check for it to avoid panics. + if tg == nil { + continue + } + if len(tg.Targets) > 0 { m.targets[poolKey][tg.Source] = tg + } else { + // The target group is empty, drop the corresponding entry to avoid leaks. + // In case the group yielded targets before, allGroups() will take care of making consumers drop them. + delete(m.targets[poolKey], tg.Source) } } } @@ -403,19 +412,33 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group { tSets := map[string][]*targetgroup.Group{} n := map[string]int{} + m.mtx.RLock() m.targetsMtx.Lock() - defer m.targetsMtx.Unlock() - for pkey, tsets := range m.targets { - for _, tg := range tsets { - // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' - // to signal that it needs to stop all scrape loops for this target set. - tSets[pkey.setName] = append(tSets[pkey.setName], tg) - n[pkey.setName] += len(tg.Targets) + for _, p := range m.providers { + p.mu.RLock() + for s := range p.subs { + // Send empty lists for subs without any targets to make sure old stale targets are dropped by consumers. + // See: https://github.com/prometheus/prometheus/issues/12858 for details. + if _, ok := tSets[s]; !ok { + tSets[s] = []*targetgroup.Group{} + n[s] = 0 + } + if tsets, ok := m.targets[poolKey{s, p.name}]; ok { + for _, tg := range tsets { + tSets[s] = append(tSets[s], tg) + n[s] += len(tg.Targets) + } + } } + p.mu.RUnlock() } + m.targetsMtx.Unlock() + m.mtx.RUnlock() + for setName, v := range n { m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) } + return tSets } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index be07edbdb4..831cefe514 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -939,11 +939,13 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { discoveryManager.ApplyConfig(c) // Original targets should be present as soon as possible. + // An empty list should be sent for prometheus2 to drop any stale targets syncedTargets = <-discoveryManager.SyncCh() mu.Unlock() - require.Len(t, syncedTargets, 1) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) require.Len(t, syncedTargets["prometheus"], 1) + require.Empty(t, syncedTargets["prometheus2"]) // prometheus2 configs should be ready on second sync. syncedTargets = <-discoveryManager.SyncCh() @@ -1049,8 +1051,8 @@ func TestDiscovererConfigs(t *testing.T) { } // TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after -// removing all targets from the static_configs sends an update with empty targetGroups. -// This is required to signal the receiver that this target set has no current targets. +// removing all targets from the static_configs cleans the corresponding targetGroups entries to avoid leaks and sends an empty update. +// The update is required to signal the consumers that the previous targets should be dropped. func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1083,16 +1085,14 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() + require.Len(t, discoveryManager.targets, 1) p = pk("static", "prometheus", 1) targetGroups, ok := discoveryManager.targets[p] - require.True(t, ok, "'%v' should be present in target groups", p) - group, ok := targetGroups[""] - require.True(t, ok, "missing '' key in target groups %v", targetGroups) - - require.Empty(t, group.Targets, "Invalid number of targets.") - require.Len(t, syncedTargets, 1) - require.Len(t, syncedTargets["prometheus"], 1) - require.Nil(t, syncedTargets["prometheus"][0].Labels) + require.True(t, ok, "'%v' should be present in targets", p) + // Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436. + require.Empty(t, targetGroups, 0, "'%v' should no longer have any associated target groups", p) + require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.") + require.Empty(t, syncedTargets["prometheus"], 0) } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { @@ -1275,6 +1275,7 @@ func TestCoordinationWithReceiver(t *testing.T) { Targets: []model.LabelSet{{"__instance__": "1"}}, }, }, + "mock1": {}, }, }, { diff --git a/discovery/metrics_refresh.go b/discovery/metrics_refresh.go index d621165ced..ef49e591a3 100644 --- a/discovery/metrics_refresh.go +++ b/discovery/metrics_refresh.go @@ -17,7 +17,7 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// Metric vectors for the "refresh" package. +// RefreshMetricsVecs are metric vectors for the "refresh" package. // We define them here in the "discovery" package in order to avoid a cyclic dependency between // "discovery" and "refresh". type RefreshMetricsVecs struct { diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 11445092ee..68f6fe3ccc 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -19,6 +19,7 @@ import ( "net" "net/http" "net/url" + "sort" "strconv" "time" @@ -251,28 +252,26 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er } if d.matchFirstNetwork && len(networks) > 1 { - // Match user defined network - if containerNetworkMode.IsUserDefined() { - networkMode := string(containerNetworkMode) - networks = map[string]*network.EndpointSettings{networkMode: networks[networkMode]} - } else { - // Get first network if container network mode has "none" value. - // This case appears under certain condition: - // 1. Container created with network set to "--net=none". - // 2. Disconnect network "none". - // 3. Reconnect network with user defined networks. - var first string - for k, n := range networks { - if n != nil { - first = k - break - } + // Sort networks by name and take first non-nil network. + keys := make([]string, 0, len(networks)) + for k, n := range networks { + if n != nil { + keys = append(keys, k) } - networks = map[string]*network.EndpointSettings{first: networks[first]} + } + if len(keys) > 0 { + sort.Strings(keys) + firstNetworkMode := keys[0] + firstNetwork := networks[firstNetworkMode] + networks = map[string]*network.EndpointSettings{firstNetworkMode: firstNetwork} } } for _, n := range networks { + if n == nil { + continue + } + var added bool for _, p := range c.Ports { diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go index c108ddf582..398393a15a 100644 --- a/discovery/moby/docker_test.go +++ b/discovery/moby/docker_test.go @@ -60,9 +60,9 @@ host: %s tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 6) + require.Len(t, tg.Targets, 8) - for i, lbls := range []model.LabelSet{ + expected := []model.LabelSet{ { "__address__": "172.19.0.2:9100", "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", @@ -163,7 +163,43 @@ host: %s "__meta_docker_network_scope": "local", "__meta_docker_port_private": "9104", }, - } { + { + "__address__": "172.20.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + } + sortFunc(expected) + sortFunc(tg.Targets) + + for i, lbls := range expected { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) @@ -202,13 +238,8 @@ host: %s tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 9) + require.Len(t, tg.Targets, 13) - sortFunc := func(labelSets []model.LabelSet) { - sort.Slice(labelSets, func(i, j int) bool { - return labelSets[i]["__address__"] < labelSets[j]["__address__"] - }) - } expected := []model.LabelSet{ { "__address__": "172.19.0.2:9100", @@ -359,6 +390,70 @@ host: %s "__meta_docker_network_scope": "local", "__meta_docker_port_private": "9104", }, + { + "__address__": "172.20.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + { + "__address__": "172.21.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.3", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.21.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.3", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, } sortFunc(expected) @@ -370,3 +465,9 @@ host: %s }) } } + +func sortFunc(labelSets []model.LabelSet) { + sort.Slice(labelSets, func(i, j int) bool { + return labelSets[i]["__address__"] < labelSets[j]["__address__"] + }) +} diff --git a/discovery/moby/mock_test.go b/discovery/moby/mock_test.go index 3f35258c8f..7ef5cb07c3 100644 --- a/discovery/moby/mock_test.go +++ b/discovery/moby/mock_test.go @@ -98,7 +98,7 @@ func (m *SDMock) SetupHandlers() { if len(query) == 2 { h := sha1.New() h.Write([]byte(query[1])) - // Avoing long filenames for Windows. + // Avoiding long filenames for Windows. f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10] } } diff --git a/discovery/moby/network.go b/discovery/moby/network.go index 794d2e607d..ea1ca66bc7 100644 --- a/discovery/moby/network.go +++ b/discovery/moby/network.go @@ -17,7 +17,7 @@ import ( "context" "strconv" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/prometheus/prometheus/util/strutil" @@ -34,7 +34,7 @@ const ( ) func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) { - networks, err := client.NetworkList(ctx, types.NetworkListOptions{}) + networks, err := client.NetworkList(ctx, network.ListOptions{}) if err != nil { return nil, err } diff --git a/discovery/moby/testdata/dockerprom/containers/json.json b/discovery/moby/testdata/dockerprom/containers/json.json index ebfc56b6d5..33406bf9a4 100644 --- a/discovery/moby/testdata/dockerprom/containers/json.json +++ b/discovery/moby/testdata/dockerprom/containers/json.json @@ -228,5 +228,74 @@ "Networks": {} }, "Mounts": [] + }, + { + "Id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "Names": [ + "/dockersd_multi_networks" + ], + "Image": "mysql:5.7.29", + "ImageID": "sha256:16ae2f4625ba63a250462bedeece422e741de9f0caf3b1d89fd5b257aca80cd1", + "Command": "mysqld", + "Created": 1616273136, + "Ports": [ + { + "PrivatePort": 3306, + "Type": "tcp" + }, + { + "PrivatePort": 33060, + "Type": "tcp" + } + ], + "Labels": { + "com.docker.compose.project": "dockersd", + "com.docker.compose.service": "mysql", + "com.docker.compose.version": "2.2.2" + }, + "State": "running", + "Status": "Up 40 seconds", + "HostConfig": { + "NetworkMode": "dockersd_private_none" + }, + "NetworkSettings": { + "Networks": { + "dockersd_private": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "EndpointID": "972d6807997369605ace863af58de6cb90c787a5bf2ffc4105662d393ae539b7", + "Gateway": "172.20.0.1", + "IPAddress": "172.20.0.3", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:14:00:02", + "DriverOpts": null + }, + "dockersd_private1": { + "IPAMConfig": {}, + "Links": null, + "Aliases": [ + "mysql", + "mysql", + "f9ade4b83199" + ], + "NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "EndpointID": "91a98405344ee1cb7d977cafabe634837876651544b32da20a5e0155868e6f5f", + "Gateway": "172.21.0.1", + "IPAddress": "172.21.0.3", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:15:00:02", + "DriverOpts": null + } + } + }, + "Mounts": [] } ] diff --git a/discovery/moby/testdata/swarmprom/services.json b/discovery/moby/testdata/swarmprom/services.json index 72caa7a7f8..8f6c0793dd 100644 --- a/discovery/moby/testdata/swarmprom/services.json +++ b/discovery/moby/testdata/swarmprom/services.json @@ -224,7 +224,7 @@ "Args": [ "--config.file=/etc/prometheus/prometheus.yml", "--storage.tsdb.path=/prometheus", - "--storage.tsdb.retention=24h" + "--storage.tsdb.retention.time=24h" ], "Privileges": { "CredentialSpec": null, diff --git a/discovery/moby/testdata/swarmprom/tasks.json b/discovery/moby/testdata/swarmprom/tasks.json index 33d81f25ce..af5ff9fe28 100644 --- a/discovery/moby/testdata/swarmprom/tasks.json +++ b/discovery/moby/testdata/swarmprom/tasks.json @@ -973,7 +973,7 @@ "Args": [ "--config.file=/etc/prometheus/prometheus.yml", "--storage.tsdb.path=/prometheus", - "--storage.tsdb.retention=24h" + "--storage.tsdb.retention.time=24h" ], "Privileges": { "CredentialSpec": null, diff --git a/discovery/util.go b/discovery/util.go index 83cc640dd9..4e2a088518 100644 --- a/discovery/util.go +++ b/discovery/util.go @@ -19,8 +19,8 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// A utility to be used by implementations of discovery.Discoverer -// which need to manage the lifetime of their metrics. +// MetricRegisterer is used by implementations of discovery.Discoverer that need +// to manage the lifetime of their metrics. type MetricRegisterer interface { RegisterMetrics() error UnregisterMetrics() @@ -34,7 +34,7 @@ type metricRegistererImpl struct { var _ MetricRegisterer = &metricRegistererImpl{} -// Creates an instance of a MetricRegisterer. +// NewMetricRegisterer creates an instance of a MetricRegisterer. // Typically called inside the implementation of the NewDiscoverer() method. func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer { return &metricRegistererImpl{ diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index c8af2f1587..2ab3396951 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -41,10 +41,10 @@ const ( uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_" uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname" uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn" - uyuniLablelSystemID = uyuniMetaLabelPrefix + "system_id" - uyuniLablelGroups = uyuniMetaLabelPrefix + "groups" - uyuniLablelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" - uyuniLablelExporter = uyuniMetaLabelPrefix + "exporter" + uyuniLabelSystemID = uyuniMetaLabelPrefix + "system_id" + uyuniLabelGroups = uyuniMetaLabelPrefix + "groups" + uyuniLabelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" + uyuniLabelExporter = uyuniMetaLabelPrefix + "exporter" uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module" uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path" uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme" @@ -270,10 +270,10 @@ func (d *Discovery) getEndpointLabels( model.AddressLabel: model.LabelValue(addr), uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname), uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN), - uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), - uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), - uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName), - uyuniLablelExporter: model.LabelValue(endpoint.ExporterName), + uyuniLabelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), + uyuniLabelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), + uyuniLabelEndpointName: model.LabelValue(endpoint.EndpointName), + uyuniLabelExporter: model.LabelValue(endpoint.ExporterName), uyuniLabelProxyModule: model.LabelValue(endpoint.Module), uyuniLabelMetricsPath: model.LabelValue(endpoint.Path), uyuniLabelScheme: model.LabelValue(scheme), diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 1fc032d09b..32b7039bd3 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -15,24 +15,25 @@ The Prometheus monitoring server | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | | --version | Show application version. | | | --config.file | Prometheus configuration file path. | `prometheus.yml` | -| --web.listen-address | Address to listen on for UI, API, and telemetry. | `0.0.0.0:9090` | +| --config.auto-reload-interval | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` | +| --web.listen-address ... | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` | | --auto-gomemlimit.ratio | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` | | --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | --web.read-timeout | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | -| --web.max-connections | Maximum number of simultaneous connections. | `512` | +| --web.max-connections | Maximum number of simultaneous connections across all listeners. | `512` | | --web.external-url | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | | | --web.route-prefix | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | | | --web.user-assets | Path to static asset directory, available at /user. | | | --web.enable-lifecycle | Enable shutdown and reload via HTTP request. | `false` | | --web.enable-admin-api | Enable API endpoints for admin control actions. | `false` | | --web.enable-remote-write-receiver | Enable API endpoint accepting remote write requests. | `false` | +| --web.remote-write-receiver.accepted-protobuf-messages | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` | | --web.console.templates | Path to the console template directory, available at /consoles. | `consoles` | | --web.console.libraries | Path to the console library directory. | `console_libraries` | | --web.page-title | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | -| --web.cors.origin | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com' | `.*` | +| --web.cors.origin | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` | | --storage.tsdb.path | Base path for metrics storage. Use with server mode only. | `data/` | -| --storage.tsdb.retention | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | | -| --storage.tsdb.retention.time | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | +| --storage.tsdb.retention.time | How long to retain samples in storage. If neither this flag nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | | --storage.tsdb.retention.size | Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Use with server mode only. | | | --storage.tsdb.no-lockfile | Do not create lockfile in data directory. Use with server mode only. | `false` | | --storage.tsdb.head-chunks-write-queue-size | Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental. Use with server mode only. | `0` | @@ -55,7 +56,8 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 443cd3f0cb..e48cede79c 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system. | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | --version | Show application version. | | --experimental | Enable experimental commands. | -| --enable-feature | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | +| --enable-feature ... | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | @@ -281,7 +281,7 @@ Run series query. | Flag | Description | | --- | --- | -| --match | Series selector. Can be specified multiple times. | +| --match ... | Series selector. Can be specified multiple times. | | --start | Start time (RFC3339 or Unix timestamp). | | --end | End time (RFC3339 or Unix timestamp). | @@ -309,7 +309,7 @@ Run labels query. | --- | --- | | --start | Start time (RFC3339 or Unix timestamp). | | --end | End time (RFC3339 or Unix timestamp). | -| --match | Series selector. Can be specified multiple times. | +| --match ... | Series selector. Can be specified multiple times. | @@ -338,7 +338,7 @@ Run queries against your Prometheus to analyze the usage pattern of certain metr | --type | Type of metric: histogram. | | | --duration | Time frame to analyze. | `1h` | | --time | Query time (RFC3339 or Unix timestamp), defaults to now. | | -| --match | Series selector. Can be specified multiple times. | | +| --match ... | Series selector. Can be specified multiple times. | | @@ -442,6 +442,15 @@ Unit testing. +#### Flags + +| Flag | Description | +| --- | --- | +| --junit | File path to store JUnit XML test results. | + + + + ##### `promtool test rules` Unit tests for rules. @@ -452,7 +461,7 @@ Unit tests for rules. | Flag | Description | Default | | --- | --- | --- | -| --run | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | +| --run ... | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | | --diff | [Experimental] Print colored differential output between expected & received output. | `false` | @@ -566,10 +575,10 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | -| --sandbox-dir-root | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | +| --sandbox-dir-root | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | | --min-time | Minimum timestamp to dump. | `-9223372036854775808` | | --max-time | Maximum timestamp to dump. | `9223372036854775807` | -| --match | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | +| --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | @@ -593,10 +602,10 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | -| --sandbox-dir-root | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | +| --sandbox-dir-root | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | | --min-time | Minimum timestamp to dump. | `-9223372036854775808` | | --max-time | Maximum timestamp to dump. | `9223372036854775807` | -| --match | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | +| --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | @@ -632,6 +641,15 @@ Import samples from OpenMetrics input and produce TSDB blocks. Please refer to t +###### Flags + +| Flag | Description | +| --- | --- | +| --label | Label to attach to metrics. Can be specified multiple times. Example --label=label_name=label_value | + + + + ###### Arguments | Argument | Description | Default | Required | diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index c03ed49715..096809397a 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -70,9 +70,12 @@ global: # How frequently to evaluate rules. [ evaluation_interval: | default = 1m ] - - # Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received. - # Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping. + + # Offset the rule evaluation timestamp of this particular group by the + # specified duration into the past to ensure the underlying metrics have + # been received. Metric availability delays are more likely to occur when + # Prometheus is running as a remote write target, but can also occur when + # there's anomalies with scraping. [ rule_query_offset: | default = 0s ] # The labels to add to any time series or alerts when communicating with @@ -84,33 +87,39 @@ global: # Reloading the configuration will reopen the file. [ query_log_file: ] + # File to which scrape failures are logged. + # Reloading the configuration will reopen the file. + [ scrape_failure_log_file: ] + # An uncompressed response body larger than this many bytes will cause the # scrape to fail. 0 means no limit. Example: 100MB. # This is an experimental feature, this behaviour could # change or be removed in the future. [ body_size_limit: | default = 0 ] - # Per-scrape limit on number of scraped samples that will be accepted. + # Per-scrape limit on the number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. [ sample_limit: | default = 0 ] - # Per-scrape limit on number of labels that will be accepted for a sample. If - # more than this number of labels are present post metric-relabeling, the - # entire scrape will be treated as failed. 0 means no limit. + # Limit on the number of labels that will be accepted per sample. If more + # than this number of labels are present on any sample post metric-relabeling, + # the entire scrape will be treated as failed. 0 means no limit. [ label_limit: | default = 0 ] - # Per-scrape limit on length of labels name that will be accepted for a sample. - # If a label name is longer than this number post metric-relabeling, the entire - # scrape will be treated as failed. 0 means no limit. + # Limit on the length (in bytes) of each individual label name. If any label + # name in a scrape is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. Note that label names are UTF-8 + # encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_name_length_limit: | default = 0 ] - # Per-scrape limit on length of labels value that will be accepted for a sample. - # If a label value is longer than this number post metric-relabeling, the - # entire scrape will be treated as failed. 0 means no limit. + # Limit on the length (in bytes) of each individual label value. If any label + # value in a scrape is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. Note that label values are UTF-8 + # encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_value_length_limit: | default = 0 ] - # Per-scrape config limit on number of unique targets that will be + # Limit per scrape config on number of unique targets that will be # accepted. If more than this number of targets are present after target # relabeling, Prometheus will mark the targets as failed without scraping them. # 0 means no limit. This is an experimental feature, this behaviour could @@ -121,6 +130,11 @@ global: # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] + # Specifies the validation scheme for metric and label names. Either blank or + # "utf8" for for full UTF-8 support, or "legacy" for letters, numbers, colons, + # and underscores. + [ metric_name_validation_scheme | default "utf8" ] + runtime: # Configure the Go garbage collector GOGC parameter # See: https://tip.golang.org/doc/gc-guide#GOGC @@ -152,6 +166,10 @@ alerting: remote_write: [ - ... ] +# Settings related to the OTLP receiver feature. +otlp: + [ promote_resource_attributes: [, ...] | default = [ ] ] + # Settings related to the remote read feature. remote_read: [ - ... ] @@ -253,9 +271,11 @@ params: # Sets the `Authorization` header on every scrape request with the # configured username and password. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -298,6 +318,21 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + +# File to which scrape failures are logged. +# Reloading the configuration will reopen the file. +[ scrape_failure_log_file: ] # List of Azure service discovery configurations. azure_sd_configs: @@ -426,45 +461,54 @@ metric_relabel_configs: # change or be removed in the future. [ body_size_limit: | default = 0 ] -# Per-scrape limit on number of scraped samples that will be accepted. +# Per-scrape limit on the number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. [ sample_limit: | default = 0 ] -# Per-scrape limit on number of labels that will be accepted for a sample. If -# more than this number of labels are present post metric-relabeling, the -# entire scrape will be treated as failed. 0 means no limit. +# Limit on the number of labels that will be accepted per sample. If more +# than this number of labels are present on any sample post metric-relabeling, +# the entire scrape will be treated as failed. 0 means no limit. [ label_limit: | default = 0 ] -# Per-scrape limit on length of labels name that will be accepted for a sample. -# If a label name is longer than this number post metric-relabeling, the entire -# scrape will be treated as failed. 0 means no limit. +# Limit on the length (in bytes) of each individual label name. If any label +# name in a scrape is longer than this number post metric-relabeling, the +# entire scrape will be treated as failed. Note that label names are UTF-8 +# encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_name_length_limit: | default = 0 ] -# Per-scrape limit on length of labels value that will be accepted for a sample. -# If a label value is longer than this number post metric-relabeling, the -# entire scrape will be treated as failed. 0 means no limit. +# Limit on the length (in bytes) of each individual label value. If any label +# value in a scrape is longer than this number post metric-relabeling, the +# entire scrape will be treated as failed. Note that label values are UTF-8 +# encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_value_length_limit: | default = 0 ] -# Per-scrape config limit on number of unique targets that will be +# Limit per scrape config on number of unique targets that will be # accepted. If more than this number of targets are present after target # relabeling, Prometheus will mark the targets as failed without scraping them. # 0 means no limit. This is an experimental feature, this behaviour could # change in the future. [ target_limit: | default = 0 ] -# Per-job limit on the number of targets dropped by relabeling +# Limit per scrape config on the number of targets dropped by relabeling # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] +# Specifies the validation scheme for metric and label names. Either blank or +# "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, and +# underscores. +[ metric_name_validation_scheme | default "utf8" ] + # Limit on total number of positive and negative buckets allowed in a single -# native histogram. If this is exceeded, the entire scrape will be treated as -# failed. 0 means no limit. +# native histogram. The resolution of a histogram with more buckets will be +# reduced until the number of buckets is within the limit. If the limit cannot +# be reached, the scrape will fail. +# 0 means no limit. [ native_histogram_bucket_limit: | default = 0 ] # Lower limit for the growth factor of one bucket to the next in each native # histogram. The resolution of a histogram with a lower growth factor will be -# reduced until it is within the limit. +# reduced as much as possible until it is within the limit. # To set an upper limit for the schema (equivalent to "scale" in OTel's # exponential histograms), use the following factor limits: # @@ -581,6 +625,18 @@ tls_config: # Specifies headers to send to proxies during CONNECT requests. [ proxy_connect_header: [ : [, ...] ] ] + +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] ``` ### `` @@ -637,11 +693,13 @@ subscription_id: # Authentication information used to authenticate to the Azure API. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information, currently not support by Azure. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -672,6 +730,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -750,11 +820,13 @@ tags: # Authentication information used to authenticate to the consul server. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -785,6 +857,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -836,11 +920,13 @@ The following meta labels are available on targets during [relabeling](#relabel_ # Authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information, not currently supported by DigitalOcean. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -872,6 +958,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -930,6 +1028,18 @@ host: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -941,7 +1051,9 @@ tls_config: # The host to use if the container is in host networking mode. [ host_networking_host: | default = "localhost" ] -# Match the first network if the container has multiple networks defined, thus avoiding collecting duplicate targets. +# Sort all non-nil networks in ascending order based on network name and +# get the first network if the container has multiple networks defined, +# thus avoiding collecting duplicate targets. [ match_first_network: | default = true ] # Optional filters to limit the discovery process to a subset of available @@ -958,11 +1070,13 @@ tls_config: # Authentication information used to authenticate to the Docker daemon. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1108,6 +1222,18 @@ host: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -1135,11 +1261,13 @@ role: # Authentication information used to authenticate to the Docker daemon. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1282,11 +1410,13 @@ filters: # Authentication information used to authenticate to the EC2 API. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information, currently not supported by AWS. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1298,7 +1428,7 @@ authorization: # `credentials_file`. [ credentials: ] # Sets the credentials to the credentials read from the configured file. - # It is mutuall exclusive with `credentials`. + # It is mutually exclusive with `credentials`. [ credentials_file: ] # Optional OAuth 2.0 configuration, currently not supported by AWS. @@ -1317,6 +1447,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1563,6 +1705,7 @@ tls_config: # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1594,6 +1737,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1782,12 +1937,14 @@ role: # Authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information, required when role is robot # Role hcloud does not support basic auth. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1820,6 +1977,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1879,11 +2048,13 @@ url: # Authentication information used to authenticate to the API server. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1914,6 +2085,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1959,12 +2142,14 @@ datacenter_id: # Authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information, required when using IONOS # Cloud username and password as authentication method. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -1997,6 +2182,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2129,6 +2326,8 @@ The `endpointslice` role discovers targets from existing endpointslices. For eac address referenced in the endpointslice object one target is discovered. If the endpoint is backed by a pod, all additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well. +The role requires the `discovery.k8s.io/v1` API version (available since Kubernetes v1.21). + Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the endpoints object. @@ -2149,7 +2348,7 @@ Available meta labels: * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation. * `__meta_kubernetes_endpointslice_endpoint_hostname`: Hostname of the referenced endpoint. * `__meta_kubernetes_endpointslice_endpoint_node_name`: Name of the Node hosting the referenced endpoint. - * `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in (only available when using the `discovery.k8s.io/v1` API group). + * `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in. * `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint. * `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint. * `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint. @@ -2162,6 +2361,8 @@ The `ingress` role discovers a target for each path of each ingress. This is generally useful for blackbox monitoring of an ingress. The address will be set to the host specified in the ingress spec. +The role requires the `networking.k8s.io/v1` API version (available since Kubernetes v1.19). + Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the ingress object. @@ -2195,11 +2396,13 @@ role: # Optional authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2231,6 +2434,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2322,6 +2537,18 @@ server: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -2329,11 +2556,13 @@ tls_config: # Authentication information used to authenticate to the Docker daemon. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2414,11 +2643,13 @@ See below for the configuration options for Lightsail discovery: # Authentication information used to authenticate to the Lightsail API. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information, currently not supported by AWS. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2430,7 +2661,7 @@ authorization: # `credentials_file`. [ credentials: ] # Sets the credentials to the credentials read from the configured file. - # It is mutuall exclusive with `credentials`. + # It is mutually exclusive with `credentials`. [ credentials_file: ] # Optional OAuth 2.0 configuration, currently not supported by AWS. @@ -2449,6 +2680,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2498,12 +2741,14 @@ The following meta labels are available on targets during [relabeling](#relabel_ # Authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Note: Linode APIv4 Token must be created with scopes: 'linodes:read_only', 'ips:read_only', and 'events:read_only' # Optional HTTP basic authentication information, not currently supported by Linode APIv4. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2538,6 +2783,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2599,9 +2856,11 @@ servers: # Sets the `Authorization` header on every request with the # configured username and password. # This is mutually exclusive with other authentication mechanisms. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2645,6 +2904,18 @@ tls_config: # Specifies headers to send to proxies during CONNECT requests. [ proxy_connect_header: [ : [, ...] ] ] + +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] ``` By default every app listed in Marathon will be scraped by Prometheus. If not all @@ -2709,11 +2980,13 @@ The following meta labels are available on targets during [relabeling](#relabel_ # Authentication information used to authenticate to the nomad server. # Note that `basic_auth`, `authorization` and `oauth2` options are # mutually exclusive. +# `username` and `username_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive. # Optional HTTP basic authentication information. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2744,6 +3017,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2892,9 +3177,11 @@ server: # Sets the `Authorization` header on every request with the # configured username and password. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -2930,6 +3217,18 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3056,6 +3355,18 @@ tags_filter: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -3101,6 +3412,7 @@ password: # Optional HTTP basic authentication information, currently not supported by Uyuni. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -3132,6 +3444,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3178,11 +3502,13 @@ The following meta labels are available on targets during [relabeling](#relabel_ # Authentication information used to authenticate to the API server. # Note that `basic_auth` and `authorization` options are # mutually exclusive. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. # Optional HTTP basic authentication information, not currently supported by Vultr. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -3214,6 +3540,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3259,12 +3597,16 @@ Initially, aside from the configured per-target labels, a target's `job` label is set to the `job_name` value of the respective scrape configuration. The `__address__` label is set to the `:` address of the target. After relabeling, the `instance` label is set to the value of `__address__` by default if -it was not set during relabeling. The `__scheme__` and `__metrics_path__` labels -are set to the scheme and metrics path of the target respectively. The `__param_` -label is set to the value of the first passed URL parameter called ``. +it was not set during relabeling. + +The `__scheme__` and `__metrics_path__` labels +are set to the scheme and metrics path of the target respectively, as specified in `scrape_config`. + +The `__param_` +label is set to the value of the first passed URL parameter called ``, as defined in `scrape_config`. The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's -interval and timeout. +interval and timeout, as specified in `scrape_config`. Additional labels prefixed with `__meta_` may be available during the relabeling phase. They are set by the service discovery mechanism that provided @@ -3378,9 +3720,11 @@ through the `__alerts_path__` label. # Sets the `Authorization` header on every request with the # configured username and password. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -3395,8 +3739,8 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] -# Optionally configures AWS's Signature Verification 4 signing process to -# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2. +# Optionally configures AWS's Signature Verification 4 signing process to sign requests. +# Cannot be set at the same time as basic_auth, authorization, oauth2, azuread or google_iam. # To use the default credentials from the AWS SDK, use `sigv4: {}`. sigv4: # The AWS region. If blank, the region from the default credentials chain @@ -3435,6 +3779,18 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3575,6 +3931,17 @@ this functionality. # The URL of the endpoint to send samples to. url: +# protobuf message to use when writing to the remote write endpoint. +# +# * The `prometheus.WriteRequest` represents the message introduced in Remote Write 1.0, which +# will be deprecated eventually. +# * The `io.prometheus.write.v2.Request` was introduced in Remote Write 2.0 and replaces the former, +# by improving efficiency and sending metadata, created timestamp and native histograms by default. +# +# Before changing this value, consult with your remote storage provider (or test) what message it supports. +# Read more on https://prometheus.io/docs/specs/remote_write_spec_2_0/#io-prometheus-write-v2-request +[ protobuf_message: | default = prometheus.WriteRequest ] + # Timeout for requests to the remote write endpoint. [ remote_timeout: | default = 30s ] @@ -3596,20 +3963,23 @@ write_relabel_configs: [ send_exemplars: | default = false ] # Enables sending of native histograms, also known as sparse histograms, over remote write. +# For the `io.prometheus.write.v2.Request` message, this option is noop (always true). [ send_native_histograms: | default = false ] # Sets the `Authorization` header on every remote write request with the # configured username and password. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] # Optional `Authorization` header configuration. authorization: # Sets the authentication type. - [ type: | default: Bearer ] + [ type: | default = Bearer ] # Sets the credentials. It is mutually exclusive with # `credentials_file`. [ credentials: ] @@ -3637,12 +4007,12 @@ sigv4: [ role_arn: ] # Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread. +# Cannot be used at the same time as basic_auth, authorization, sigv4, azuread or google_iam. oauth2: [ ] # Optional AzureAD configuration. -# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4. +# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or google_iam. azuread: # The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. [ cloud: | default = AzurePublic ] @@ -3662,6 +4032,14 @@ azuread: [ sdk: [ tenant_id: ] ] +# WARNING: Remote write is NOT SUPPORTED by Google Cloud. This configuration is reserved for future use. +# Optional Google Cloud Monitoring configuration. +# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread. +# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`. +google_iam: + # Service account key with monitoring write permissions. + credentials_file: + # Configures the remote write request's TLS settings. tls_config: [ ] @@ -3673,16 +4051,28 @@ tls_config: # contain port numbers. [ no_proxy: ] # Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] +[ proxy_from_environment: | default = false ] # Specifies headers to send to proxies during CONNECT requests. [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default = true ] # Configures the queue used to write to remote storage. queue_config: @@ -3712,7 +4102,10 @@ queue_config: # which means that all samples are sent. [ sample_age_limit: | default = 0s ] -# Configures the sending of series metadata to remote storage. +# Configures the sending of series metadata to remote storage +# if the `prometheus.WriteRequest` message was chosen. When +# `io.prometheus.write.v2.Request` is used, metadata is always sent. +# # Metadata configuration is subject to change at any point # or be removed in future releases. metadata_config: @@ -3758,9 +4151,11 @@ headers: # Sets the `Authorization` header on every remote read request with the # configured username and password. +# username and username_file are mutually exclusive. # password and password_file are mutually exclusive. basic_auth: [ username: ] + [ username_file: ] [ password: ] [ password_file: ] @@ -3796,6 +4191,18 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] diff --git a/docs/configuration/unit_testing_rules.md b/docs/configuration/unit_testing_rules.md index 163fcb91f1..7fc676a251 100644 --- a/docs/configuration/unit_testing_rules.md +++ b/docs/configuration/unit_testing_rules.md @@ -92,7 +92,7 @@ series: # # Native histogram notation: # Native histograms can be used instead of floating point numbers using the following notation: -# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}} +# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}} # Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'. # All properties are optional and default to 0. The order is not important. The following properties are supported: # - schema (int): @@ -119,6 +119,8 @@ series: # Observation counts in negative buckets. Each represents an absolute count. # - n_offset (int): # The starting index of the first entry in the negative buckets. +# - counter_reset_hint (one of 'unknown', 'reset', 'not_reset' or 'gauge') +# The counter reset hint associated with this histogram. Defaults to 'unknown' if not set. values: ``` diff --git a/docs/feature_flags.md b/docs/feature_flags.md index a5dc69a718..911dde20e4 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -20,14 +20,6 @@ values according to the values of the current environment variables. References to undefined variables are replaced by the empty string. The `$` character can be escaped by using `$$`. -## Remote Write Receiver - -`--enable-feature=remote-write-receiver` - -The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview). - -Activating the remote write receiver via a feature flag is deprecated. Use `--web.enable-remote-write-receiver` instead. This feature flag will be ignored in future versions of Prometheus. - ## Exemplars storage `--enable-feature=exemplar-storage` @@ -55,30 +47,6 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`. - `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`. -## New service discovery manager - -`--enable-feature=new-service-discovery-manager` - -When enabled, Prometheus uses a new service discovery manager that does not -restart unchanged discoveries upon reloading. This makes reloads faster and reduces -pressure on service discoveries' sources. - -Users are encouraged to test the new service discovery manager and report any -issues upstream. - -In future releases, this new service discovery manager will become the default and -this feature flag will be ignored. - -## Prometheus agent - -`--enable-feature=agent` - -When enabled, Prometheus runs in agent mode. The agent mode is limited to -discovery, scrape and remote write. - -This is useful when you do not need to query the Prometheus data locally, but -only from a central [remote endpoint](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). - ## Per-step stats `--enable-feature=promql-per-step-stats` @@ -193,15 +161,16 @@ This should **only** be applied to metrics that currently produce such labels. `--enable-feature=otlp-write-receiver` The OTLP receiver allows Prometheus to accept [OpenTelemetry](https://opentelemetry.io/) metrics writes. -Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features +Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features won't work when you push OTLP metrics. ## Experimental PromQL functions `--enable-feature=promql-experimental-functions` -Enables PromQL functions that are considered experimental and whose name or -semantics could change. +Enables PromQL functions that are considered experimental. These functions +might change their name, syntax, or semantics. They might also get removed +entirely. ## Created Timestamps Zero Injection @@ -224,3 +193,54 @@ When the `concurrent-rule-eval` feature flag is enabled, rules without any depen This has the potential to improve rule group evaluation latency and resource utilization at the expense of adding more concurrent query load. The number of concurrent rule evaluations can be configured with `--rules.max-concurrent-rule-evals`, which is set to `4` by default. + +## Serve old Prometheus UI + +Fall back to serving the old (Prometheus 2.x) web UI instead of the new UI. The new UI that was released as part of Prometheus 3.0 is a complete rewrite and aims to be cleaner, less cluttered, and more modern under the hood. However, it is not fully feature complete and battle-tested yet, so some users may still prefer using the old UI. + +`--enable-feature=old-ui` + +## Metadata WAL Records + +`--enable-feature=metadata-wal-records` + +When enabled, Prometheus will store metadata in-memory and keep track of +metadata changes as WAL records on a per-series basis. + +This must be used if +you are also using remote write 2.0 as it will only gather metadata from the WAL. + +## Delay compaction start time + +`--enable-feature=delayed-compaction` + +A random offset, up to `10%` of the chunk range, is added to the Head compaction start time. This assists Prometheus instances in avoiding simultaneous compactions and reduces the load on shared resources. + +Only auto Head compactions and the operations directly resulting from them are subject to this delay. + +In the event of multiple consecutive Head compactions being possible, only the first compaction experiences this delay. + +Note that during this delay, the Head continues its usual operations, which include serving and appending series. + +Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place. + +## Delay __name__ label removal for PromQL engine + +`--enable-feature=promql-delayed-name-removal` + +When enabled, Prometheus will change the way in which the `__name__` label is removed from PromQL query results (for functions and expressions for which this is necessary). Specifically, it will delay the removal to the last step of the query evaluation, instead of every time an expression or function creating derived metrics is evaluated. + +This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label. + +## Auto Reload Config + +`--enable-feature=auto-reload-config` + +When enabled, Prometheus will automatically reload its configuration file at a +specified interval. The interval is defined by the +`--config.auto-reload-interval` flag, which defaults to `30s`. + +Configuration reloads are triggered by detecting changes in the checksum of the +main configuration file or any referenced files, such as rule and scrape +configurations. To ensure consistency and avoid issues during reloads, it's +recommended to update these files atomically. diff --git a/docs/querying/api.md b/docs/querying/api.md index 71e01b3b95..e32c8ecaf5 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -25,8 +25,10 @@ Other non-`2xx` codes may be returned for errors occurring before the API endpoint is reached. An array of warnings may be returned if there are errors that do -not inhibit the request execution. All of the data that was successfully -collected will be returned in the data field. +not inhibit the request execution. An additional array of info-level +annotations may be returned for potential query issues that may or may +not be false positives. All of the data that was successfully collected +will be returned in the data field. The JSON response envelope format is as follows: @@ -40,9 +42,11 @@ The JSON response envelope format is as follows: "errorType": "", "error": "", - // Only if there were warnings while executing the request. + // Only set if there were warnings while executing the request. // There will still be data in the data field. - "warnings": [""] + "warnings": [""], + // Only set if there were info-level annnotations while executing the request. + "infos": [""] } ``` @@ -235,6 +239,75 @@ $ curl 'http://localhost:9090/api/v1/format_query?query=foo/bar' } ``` +## Parsing a PromQL expressions into a abstract syntax tree (AST) + +This endpoint is **experimental** and might change in the future. It is currently only meant to be used by Prometheus' own web UI, and the endpoint name and exact format returned may change from one Prometheus version to another. It may also be removed again in case it is no longer needed by the UI. + +The following endpoint parses a PromQL expression and returns it as a JSON-formatted AST (abstract syntax tree) representation: + +``` +GET /api/v1/parse_query +POST /api/v1/parse_query +``` + +URL query parameters: + +- `query=`: Prometheus expression query string. + +You can URL-encode these parameters directly in the request body by using the `POST` method and +`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large +query that may breach server-side URL character limits. + +The `data` section of the query result is a string containing the AST of the parsed query expression. + +The following example parses the expression `foo/bar`: + +```json +$ curl 'http://localhost:9090/api/v1/parse_query?query=foo/bar' +{ + "data" : { + "bool" : false, + "lhs" : { + "matchers" : [ + { + "name" : "__name__", + "type" : "=", + "value" : "foo" + } + ], + "name" : "foo", + "offset" : 0, + "startOrEnd" : null, + "timestamp" : null, + "type" : "vectorSelector" + }, + "matching" : { + "card" : "one-to-one", + "include" : [], + "labels" : [], + "on" : false + }, + "op" : "/", + "rhs" : { + "matchers" : [ + { + "name" : "__name__", + "type" : "=", + "value" : "bar" + } + ], + "name" : "bar", + "offset" : 0, + "startOrEnd" : null, + "timestamp" : null, + "type" : "vectorSelector" + }, + "type" : "binaryExpr" + }, + "status" : "success" +} +``` + ## Querying metadata Prometheus offers a set of API endpoints to query metadata about series and their labels. @@ -256,7 +329,7 @@ URL query parameters: series to return. At least one `match[]` argument must be provided. - `start=`: Start timestamp. - `end=`: End timestamp. -- `limit=`: Maximum number of returned series. Optional. +- `limit=`: Maximum number of returned series. Optional. 0 means disabled. You can URL-encode these parameters directly in the request body by using the `POST` method and `Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large @@ -307,7 +380,7 @@ URL query parameters: - `end=`: End timestamp. Optional. - `match[]=`: Repeated series selector argument that selects the series from which to read the label names. Optional. -- `limit=`: Maximum number of returned series. Optional. +- `limit=`: Maximum number of returned series. Optional. 0 means disabled. The `data` section of the JSON response is a list of string label names. @@ -358,7 +431,7 @@ URL query parameters: - `end=`: End timestamp. Optional. - `match[]=`: Repeated series selector argument that selects the series from which to read the label values. Optional. -- `limit=`: Maximum number of returned series. Optional. +- `limit=`: Maximum number of returned series. Optional. 0 means disabled. The `data` section of the JSON response is a list of string label values. @@ -452,7 +525,7 @@ raw numbers. The keys `"histogram"` and `"histograms"` only show up if the experimental native histograms are present in the response. Their placeholder `` -is explained in detail in its own section below. +is explained in detail in its own section below. ### Range vectors @@ -470,7 +543,7 @@ Range vectors are returned as result type `matrix`. The corresponding ] ``` -Each series could have the `"values"` key, or the `"histograms"` key, or both. +Each series could have the `"values"` key, or the `"histograms"` key, or both. For a given timestamp, there will only be one sample of either float or histogram type. Series are returned sorted by `metric`. Functions such as [`sort`](functions.md#sort) @@ -690,6 +763,7 @@ URL query parameters: - `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done. - `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. - `exclude_alerts=`: only return rules, do not return active alerts. +- `match[]=`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional. ```json $ curl http://localhost:9090/api/v1/rules @@ -1309,7 +1383,7 @@ endpoint is `/api/v1/write`. Find more details [here](../storage.md#overview). ## OTLP Receiver -Prometheus can be configured as a receiver for the OTLP Metrics protocol. This +Prometheus can be configured as a receiver for the OTLP Metrics protocol. This is not considered an efficient way of ingesting samples. Use it with caution for specific low-volume use cases. It is not suitable for replacing the ingestion via scraping. diff --git a/docs/querying/basics.md b/docs/querying/basics.md index fee7e63c42..4ea186beeb 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -8,9 +8,15 @@ sort_rank: 1 Prometheus provides a functional query language called PromQL (Prometheus Query Language) that lets the user select and aggregate time series data in real -time. The result of an expression can either be shown as a graph, viewed as -tabular data in Prometheus's expression browser, or consumed by external -systems via the [HTTP API](api.md). +time. + +When you send a query request to Prometheus, it can be an _instant query_, evaluated at one point in time, +or a _range query_ at equally-spaced steps between a start and an end time. PromQL works exactly the same +in each cases; the range query is just like an instant query run multiple times at different timestamps. + +In the Prometheus UI, the "Table" tab is for instant queries and the "Graph" tab is for range queries. + +Other programs can fetch the result of a PromQL expression via the [HTTP API](api.md). ## Examples @@ -35,7 +41,7 @@ vector is the only type which can be graphed. _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../../feature_flags.md#native-histograms). + flag](../feature_flags.md#native-histograms). * Once native histograms have been ingested into the TSDB (and even after disabling the feature flag again), both instant vectors and range vectors may now contain samples that aren't simple floating point numbers (float samples) @@ -81,12 +87,20 @@ Examples: 0x8f -Inf NaN + + +As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change. + +Examples: + + 1s # Equivalent to 1.0 + 2m # Equivalent to 120.0 + 1ms # Equivalent to 0.001 + ## Time series selectors -Time series selectors are responsible for selecting the times series and raw or inferred sample timestamps and values. - -Time series *selectors* are not to be confused with higher level concept of instant and range *queries* that can execute the time series *selectors*. A higher level instant query would evaluate the given selector at one point in time, however the range query would evaluate the selector at multiple different times in between a minimum and maximum timestamp at regular steps. +These are the basic building-blocks that instruct PromQL what data to fetch. ### Instant vector selectors @@ -189,12 +203,12 @@ Range vector literals work like instant vector literals, except that they select a range of samples back from the current instant. Syntactically, a [time duration](#time-durations) is appended in square brackets (`[]`) at the end of a vector selector to specify how far back in time values should be fetched for -each resulting range vector element. The range is a closed interval, -i.e. samples with timestamps coinciding with either boundary of the range are -still included in the selection. +each resulting range vector element. The range is a left-open and right-closed interval, +i.e. samples with timestamps coinciding with the left boundary of the range are excluded from the selection, +while samples coinciding with the right boundary of the range are included in the selection. -In this example, we select all the values we have recorded within the last 5 -minutes for all time series that have the metric name `http_requests_total` and +In this example, we select all the values recorded less than 5m ago for all time series +that have the metric name `http_requests_total` and a `job` label set to `prometheus`: http_requests_total{job="prometheus"}[5m] @@ -224,6 +238,15 @@ Here are some examples of valid time durations: 5m 10s + +As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change. + +Examples: + + 1.0 # Equivalent to 1s + 0.001 # Equivalent to 1ms + 120 # Equivalent to 2m + ### Offset modifier The `offset` modifier allows changing the time offset for individual @@ -335,7 +358,7 @@ independently of the actual present time series data. This is mainly to support cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated time series do not precisely align in time. Because of their independence, Prometheus needs to assign a value at those timestamps for each relevant time -series. It does so by taking the newest sample before this timestamp within the lookback period. +series. It does so by taking the newest sample that is less than the lookback period ago. The lookback period is 5 minutes by default. If a target scrape or rule evaluation no longer returns a sample for a time diff --git a/docs/querying/examples.md b/docs/querying/examples.md index 957c2f5f98..8287ff6f62 100644 --- a/docs/querying/examples.md +++ b/docs/querying/examples.md @@ -95,3 +95,13 @@ Assuming this metric contains one time series per running instance, you could count the number of running instances per application like this: count by (app) (instance_cpu_time_ns) + +If we are exploring some metrics for their labels, to e.g. be able to aggregate +over some of them, we could use the following: + + limitk(10, app_foo_metric_bar) + +Alternatively, if we wanted the returned timeseries to be more evenly sampled, +we could use the following to get approximately 10% of them: + + limit_ratio(0.1, app_foo_metric_bar) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 9a552f697a..b6f2181803 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -98,8 +98,9 @@ vector. clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`. Special cases: -- Return an empty vector if `min > max` -- Return `NaN` if `min` or `max` is `NaN` + +* Return an empty vector if `min > max` +* Return `NaN` if `min` or `max` is `NaN` ## `clamp_max()` @@ -325,45 +326,70 @@ With native histograms, aggregating everything works as usual without any `by` c histogram_quantile(0.9, sum(rate(http_request_duration_seconds[10m]))) -The `histogram_quantile()` function interpolates quantile values by -assuming a linear distribution within a bucket. +In the (common) case that a quantile value does not coincide with a bucket +boundary, the `histogram_quantile()` function interpolates the quantile value +within the bucket the quantile value falls into. For classic histograms, for +native histograms with custom bucket boundaries, and for the zero bucket of +other native histograms, it assumes a uniform distribution of observations +within the bucket (also called _linear interpolation_). For the +non-zero-buckets of native histograms with a standard exponential bucketing +schema, the interpolation is done under the assumption that the samples within +the bucket are distributed in a way that they would uniformly populate the +buckets in a hypothetical histogram with higher resolution. (This is also +called _exponential interpolation_.) If `b` has 0 observations, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned. -The following is only relevant for classic histograms: If `b` contains -fewer than two buckets, `NaN` is returned. The highest bucket must have an -upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If a quantile is located -in the highest bucket, the upper bound of the second highest bucket is -returned. A lower limit of the lowest bucket is assumed to be 0 if the upper -bound of that bucket is greater than -0. In that case, the usual linear interpolation is applied within that -bucket. Otherwise, the upper bound of the lowest bucket is returned for -quantiles located in the lowest bucket. +Special cases for classic histograms: -You can use `histogram_quantile(0, v instant-vector)` to get the estimated minimum value stored in -a histogram. +* If `b` contains fewer than two buckets, `NaN` is returned. +* The highest bucket must have an upper bound of `+Inf`. (Otherwise, `NaN` is + returned.) +* If a quantile is located in the highest bucket, the upper bound of the second + highest bucket is returned. +* The lower limit of the lowest bucket is assumed to be 0 if the upper bound of + that bucket is greater than 0. In that case, the usual linear interpolation + is applied within that bucket. Otherwise, the upper bound of the lowest + bucket is returned for quantiles located in the lowest bucket. -You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in -a histogram. +Special cases for native histograms (relevant for the exact interpolation +happening within the zero bucket): -Buckets of classic histograms are cumulative. Therefore, the following should always be the case: +* A zero bucket with finite width is assumed to contain no negative + observations if the histogram has observations in positive buckets, but none + in negative buckets. +* A zero bucket with finite width is assumed to contain no positive + observations if the histogram has observations in negative buckets, but none + in positive buckets. -- The counts in the buckets are monotonically increasing (strictly non-decreasing). -- A lack of observations between the upper limits of two consecutive buckets results in equal counts -in those two buckets. +You can use `histogram_quantile(0, v instant-vector)` to get the estimated +minimum value stored in a histogram. -However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets -with `sum(rate(...))`) or invalid data might violate these assumptions. In that case, -`histogram_quantile` would be unable to return meaningful results. To mitigate the issue, -`histogram_quantile` assumes that tiny relative differences between consecutive buckets are happening -because of floating point precision errors and ignores them. (The threshold to ignore a difference -between two buckets is a trillionth (1e-12) of the sum of both buckets.) Furthermore, if there are -non-monotonic bucket counts even after this adjustment, they are increased to the value of the -previous buckets to enforce monotonicity. The latter is evidence for an actual issue with the input -data and is therefore flagged with an informational annotation reading `input to histogram_quantile -needed to be fixed for monotonicity`. If you encounter this annotation, you should find and remove -the source of the invalid data. +You can use `histogram_quantile(1, v instant-vector)` to get the estimated +maximum value stored in a histogram. + +Buckets of classic histograms are cumulative. Therefore, the following should +always be the case: + +* The counts in the buckets are monotonically increasing (strictly + non-decreasing). +* A lack of observations between the upper limits of two consecutive buckets + results in equal counts in those two buckets. + +However, floating point precision issues (e.g. small discrepancies introduced +by computing of buckets with `sum(rate(...))`) or invalid data might violate +these assumptions. In that case, `histogram_quantile` would be unable to return +meaningful results. To mitigate the issue, `histogram_quantile` assumes that +tiny relative differences between consecutive buckets are happening because of +floating point precision errors and ignores them. (The threshold to ignore a +difference between two buckets is a trillionth (1e-12) of the sum of both +buckets.) Furthermore, if there are non-monotonic bucket counts even after this +adjustment, they are increased to the value of the previous buckets to enforce +monotonicity. The latter is evidence for an actual issue with the input data +and is therefore flagged with an informational annotation reading `input to +histogram_quantile needed to be fixed for monotonicity`. If you encounter this +annotation, you should find and remove the source of the invalid data. ## `histogram_stddev()` and `histogram_stdvar()` @@ -379,15 +405,22 @@ do not show up in the returned vector. Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard variance of observations in a native histogram. -## `holt_winters()` +## `double_exponential_smoothing()` -`holt_winters(v range-vector, sf scalar, tf scalar)` produces a smoothed value +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** + +`double_exponential_smoothing(v range-vector, sf scalar, tf scalar)` produces a smoothed value for time series based on the range in `v`. The lower the smoothing factor `sf`, the more importance is given to old data. The higher the trend factor `tf`, the more trends in the data is considered. Both `sf` and `tf` must be between 0 and 1. +For additional details, refer to [NIST Engineering Statistics Handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc433.htm). +In Prometheus V2 this function was called `holt_winters`. This caused confusion +since the Holt-Winters method usually refers to triple exponential smoothing. +Double exponential smoothing as implemented here is also referred to as "Holt +Linear". -`holt_winters` should only be used with gauges. +`double_exponential_smoothing` should only be used with gauges. ## `hour()` @@ -616,9 +649,9 @@ Like `sort`, `sort_desc` only affects the results of instant queries, as range q ## `sort_by_label()` -**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** -`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by their label values and sample value in case of label values being equal, in ascending order. +`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by the values of the given labels in ascending order. In case these label values are equal, elements are sorted by their full label sets. Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering. @@ -626,7 +659,7 @@ This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_so ## `sort_by_label_desc()` -**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** Same as `sort_by_label`, but sorts in descending order. @@ -675,7 +708,7 @@ over time and return an instant vector with per-series aggregation results: * `last_over_time(range-vector)`: the most recent point value in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval. -If the [feature flag](../feature_flags/) +If the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions` is set, the following additional functions are available: @@ -692,21 +725,21 @@ ignore histogram samples. The trigonometric functions work in radians: -- `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)). -- `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)). -- `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)). -- `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)). -- `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)). -- `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)). -- `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)). -- `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)). -- `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)). -- `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)). -- `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)). -- `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)). +* `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)). +* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)). +* `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)). +* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)). +* `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)). +* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)). +* `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)). +* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)). +* `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)). +* `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)). +* `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)). +* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)). The following are useful for converting between degrees and radians: -- `deg(v instant-vector)`: converts radians to degrees for all elements in `v`. -- `pi()`: returns pi. -- `rad(v instant-vector)`: converts degrees to radians for all elements in `v`. +* `deg(v instant-vector)`: converts radians to degrees for all elements in `v`. +* `pi()`: returns pi. +* `rad(v instant-vector)`: converts degrees to radians for all elements in `v`. diff --git a/docs/querying/operators.md b/docs/querying/operators.md index b92bdd94ac..f5f217ff63 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -230,6 +230,8 @@ vector of fewer elements with aggregated values: * `bottomk` (smallest k elements by sample value) * `topk` (largest k elements by sample value) * `quantile` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions) +* `limitk` (sample n elements) +* `limit_ratio` (sample elements with approximately 𝑟 ratio if `𝑟 > 0`, and the complement of such samples if `𝑟 = -(1.0 - 𝑟)`) These operators can either be used to aggregate over **all** label dimensions or preserve distinct dimensions by including a `without` or `by` clause. These @@ -249,8 +251,8 @@ all other labels are preserved in the output. `by` does the opposite and drops labels that are not listed in the `by` clause, even if their label values are identical between all elements of the vector. -`parameter` is only required for `count_values`, `quantile`, `topk` and -`bottomk`. +`parameter` is only required for `count_values`, `quantile`, `topk`, +`bottomk`, `limitk` and `limit_ratio`. `count_values` outputs one time series per unique sample value. Each series has an additional label. The name of that label is given by the aggregation @@ -261,11 +263,16 @@ time series is the number of times that sample value was present. the input samples, including the original labels, are returned in the result vector. `by` and `without` are only used to bucket the input vector. +`limitk` and `limit_ratio` also return a subset of the input samples, +including the original labels in the result vector, these are experimental +operators that must be enabled with `--enable-feature=promql-experimental-functions`. + `quantile` calculates the φ-quantile, the value that ranks at number φ*N among the N metric values of the dimensions aggregated over. φ is provided as the aggregation parameter. For example, `quantile(0.5, ...)` calculates the median, `quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. + Example: If the metric `http_requests_total` had time series that fan out by @@ -291,6 +298,33 @@ To get the 5 largest HTTP requests counts across all instances we could write: topk(5, http_requests_total) +To sample 10 timeseries, for example to inspect labels and their values, we +could write: + + limitk(10, http_requests_total) + +To deterministically sample approximately 10% of timeseries we could write: + + limit_ratio(0.1, http_requests_total) + +Given that `limit_ratio()` implements a deterministic sampling algorithm (based +on labels' hash), you can get the _complement_ of the above samples, i.e. +approximately 90%, but precisely those not returned by `limit_ratio(0.1, ...)` +with: + + limit_ratio(-0.9, http_requests_total) + +You can also use this feature to e.g. verify that `avg()` is a representative +aggregation for your samples' values, by checking that the difference between +averaging two samples' subsets is "small" when compared to the standard +deviation. + + abs( + avg(limit_ratio(0.5, http_requests_total)) + - + avg(limit_ratio(-0.5, http_requests_total)) + ) <= bool stddev(http_requests_total) + ## Binary operator precedence The following list shows the precedence of binary operators in Prometheus, from diff --git a/docs/storage.md b/docs/storage.md index 947960fe12..2142c970ff 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -87,10 +87,9 @@ or 31 days, whichever is smaller. Prometheus has several flags that configure local storage. The most important are: - `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`. -- `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is - set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention` - nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`. - Supported units: y, w, d, h, m, s, ms. +- `--storage.tsdb.retention.time`: How long to retain samples in storage. If neither + this flag nor `storage.tsdb.retention.size` is set, the retention time defaults to + `15d`. Supported units: y, w, d, h, m, s, ms. - `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. The oldest data will be removed first. Defaults to `0` or disabled. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only @@ -98,7 +97,6 @@ Prometheus has several flags that configure local storage. The most important ar chunks are counted in the total size. So the minimum requirement for the disk is the peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head` (m-mapped Head chunks) directory combined (peaks every 2 hours). -- `--storage.tsdb.retention`: Deprecated in favor of `storage.tsdb.retention.time`. - `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL). Depending on your data, you can expect the WAL size to be halved with little extra cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. @@ -137,6 +135,18 @@ will be used. Expired block cleanup happens in the background. It may take up to two hours to remove expired blocks. Blocks must be fully expired before they are removed. +## Right-Sizing Retention Size + +If you are utilizing `storage.tsdb.retention.size` to set a size limit, you +will want to consider the right size for this value relative to the storage you +have allocated for Prometheus. It is wise to reduce the retention size to provide +a buffer, ensuring that older entries will be removed before the allocated storage +for Prometheus becomes full. + +At present, we recommend setting the retention size to, at most, 80-85% of your +allocated Prometheus disk space. This increases the likelihood that older entries +will be removed prior to hitting any disk limitations. + ## Remote storage integrations Prometheus's local storage is limited to a single node's scalability and durability. @@ -145,31 +155,27 @@ a set of interfaces that allow integrating with remote storage systems. ### Overview -Prometheus integrates with remote storage systems in three ways: +Prometheus integrates with remote storage systems in four ways: -- Prometheus can write samples that it ingests to a remote URL in a standardized format. -- Prometheus can receive samples from other Prometheus servers in a standardized format. -- Prometheus can read (back) sample data from a remote URL in a standardized format. +- Prometheus can write samples that it ingests to a remote URL in a [Remote Write format](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +- Prometheus can receive samples from other clients in a [Remote Write format](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +- Prometheus can read (back) sample data from a remote URL in a [Remote Read format](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto#L31). +- Prometheus can return sample data requested by clients in a [Remote Read format](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto#L31). ![Remote read and write architecture](images/remote_integrations.png) -The read and write protocols both use a snappy-compressed protocol buffer encoding over -HTTP. The protocols are not considered as stable APIs yet and may change to use gRPC -over HTTP/2 in the future, when all hops between Prometheus and the remote storage can -safely be assumed to support HTTP/2. +The remote read and write protocols both use a snappy-compressed protocol buffer encoding over +HTTP. The read protocol is not yet considered as stable API. -For details on configuring remote storage integrations in Prometheus, see the +The write protocol has a [stable specification for 1.0 version](https://prometheus.io/docs/specs/remote_write_spec/) +and [experimental specification for 2.0 version](https://prometheus.io/docs/specs/remote_write_spec_2_0/), +both supported by Prometheus server. + +For details on configuring remote storage integrations in Prometheus as a client, see the [remote write](configuration/configuration.md#remote_write) and [remote read](configuration/configuration.md#remote_read) sections of the Prometheus configuration documentation. -The built-in remote write receiver can be enabled by setting the -`--web.enable-remote-write-receiver` command line flag. When enabled, -the remote write receiver endpoint is `/api/v1/write`. - -For details on the request and response messages, see the -[remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto). - Note that on the read path, Prometheus only fetches raw series data for a set of label selectors and time ranges from the remote end. All PromQL evaluation on the raw data still happens in Prometheus itself. This means that remote read queries @@ -177,6 +183,11 @@ have some scalability limit, since all necessary data needs to be loaded into th querying Prometheus server first and then processed there. However, supporting fully distributed evaluation of PromQL was deemed infeasible for the time being. +Prometheus also serves both protocols. The built-in remote write receiver can be enabled +by setting the `--web.enable-remote-write-receiver` command line flag. When enabled, +the remote write receiver endpoint is `/api/v1/write`. The remote read endpoint is +available on [`/api/v1/read`](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/). + ### Existing integrations To learn more about existing integrations with remote storage systems, see the diff --git a/documentation/examples/prometheus-ovhcloud.yml b/documentation/examples/prometheus-ovhcloud.yml index 21facad1ca..b2cc60af25 100644 --- a/documentation/examples/prometheus-ovhcloud.yml +++ b/documentation/examples/prometheus-ovhcloud.yml @@ -1,4 +1,4 @@ -# An example scrape configuration for running Prometheus with Ovhcloud. +# An example scrape configuration for running Prometheus with OVHcloud. scrape_configs: - job_name: 'ovhcloud' ovhcloud_sd_configs: diff --git a/documentation/examples/remote_storage/example_write_adapter/README.md b/documentation/examples/remote_storage/example_write_adapter/README.md index 9748c448db..968d2b25cb 100644 --- a/documentation/examples/remote_storage/example_write_adapter/README.md +++ b/documentation/examples/remote_storage/example_write_adapter/README.md @@ -7,6 +7,7 @@ To use it: ``` go build + ./example_write_adapter ``` @@ -15,10 +16,19 @@ go build ```yaml remote_write: - url: "http://localhost:1234/receive" + protobuf_message: "io.prometheus.write.v2.Request" ``` -Then start Prometheus: +or for the eventually deprecated Remote Write 1.0 message: + +```yaml +remote_write: + - url: "http://localhost:1234/receive" + protobuf_message: "prometheus.WriteRequest" +``` + +Then start Prometheus (in separate terminal): ``` -./prometheus +./prometheus --enable-feature=metadata-wal-records ``` diff --git a/documentation/examples/remote_storage/example_write_adapter/server.go b/documentation/examples/remote_storage/example_write_adapter/server.go index 48c0a9571f..727a3056d3 100644 --- a/documentation/examples/remote_storage/example_write_adapter/server.go +++ b/documentation/examples/remote_storage/example_write_adapter/server.go @@ -18,44 +18,103 @@ import ( "log" "net/http" - "github.com/prometheus/common/model" - + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage/remote" ) func main() { http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) { - req, err := remote.DecodeWriteRequest(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + enc := r.Header.Get("Content-Encoding") + if enc == "" { + http.Error(w, "missing Content-Encoding header", http.StatusUnsupportedMediaType) + return + } + if enc != "snappy" { + http.Error(w, "unknown encoding, only snappy supported", http.StatusUnsupportedMediaType) return } - for _, ts := range req.Timeseries { - m := make(model.Metric, len(ts.Labels)) - for _, l := range ts.Labels { - m[model.LabelName(l.Name)] = model.LabelValue(l.Value) - } - fmt.Println(m) + contentType := r.Header.Get("Content-Type") + if contentType == "" { + http.Error(w, "missing Content-Type header", http.StatusUnsupportedMediaType) + } - for _, s := range ts.Samples { - fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) - } + defer func() { _ = r.Body.Close() }() - for _, e := range ts.Exemplars { - m := make(model.Metric, len(e.Labels)) - for _, l := range e.Labels { - m[model.LabelName(l.Name)] = model.LabelValue(l.Value) - } - fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp) + // Very simplistic content parsing, see + // storage/remote/write_handler.go#WriteHandler.ServeHTTP for production example. + switch contentType { + case "application/x-protobuf", "application/x-protobuf;proto=prometheus.WriteRequest": + req, err := remote.DecodeWriteRequest(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return } - - for _, hp := range ts.Histograms { - h := remote.HistogramProtoToHistogram(hp) - fmt.Printf("\tHistogram: %s\n", h.String()) + printV1(req) + case "application/x-protobuf;proto=io.prometheus.write.v2.Request": + req, err := remote.DecodeWriteV2Request(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return } + printV2(req) + default: + msg := fmt.Sprintf("Unknown remote write content type: %s", contentType) + fmt.Println(msg) + http.Error(w, msg, http.StatusBadRequest) } }) - log.Fatal(http.ListenAndServe(":1234", nil)) } + +func printV1(req *prompb.WriteRequest) { + b := labels.NewScratchBuilder(0) + for _, ts := range req.Timeseries { + fmt.Println(ts.ToLabels(&b, nil)) + + for _, s := range ts.Samples { + fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) + } + for _, ep := range ts.Exemplars { + e := ep.ToExemplar(&b, nil) + fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp) + } + for _, hp := range ts.Histograms { + if hp.IsFloatHistogram() { + h := hp.ToFloatHistogram() + fmt.Printf("\tHistogram: %s\n", h.String()) + continue + } + h := hp.ToIntHistogram() + fmt.Printf("\tHistogram: %s\n", h.String()) + } + } +} + +func printV2(req *writev2.Request) { + b := labels.NewScratchBuilder(0) + for _, ts := range req.Timeseries { + l := ts.ToLabels(&b, req.Symbols) + m := ts.ToMetadata(req.Symbols) + fmt.Println(l, m) + + for _, s := range ts.Samples { + fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) + } + for _, ep := range ts.Exemplars { + e := ep.ToExemplar(&b, req.Symbols) + fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp) + } + for _, hp := range ts.Histograms { + if hp.IsFloatHistogram() { + h := hp.ToFloatHistogram() + fmt.Printf("\tHistogram: %s\n", h.String()) + continue + } + h := hp.ToIntHistogram() + fmt.Printf("\tHistogram: %s\n", h.String()) + } + } +} diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index d4f19749dc..8ed5084d91 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -1,26 +1,26 @@ module github.com/prometheus/prometheus/documentation/examples/remote_storage -go 1.21 +go 1.22.0 require ( github.com/alecthomas/kingpin/v2 v2.4.0 github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/influxdata/influxdb v1.11.5 - github.com/prometheus/client_golang v1.19.1 - github.com/prometheus/common v0.55.0 - github.com/prometheus/prometheus v0.52.1 + github.com/influxdata/influxdb v1.11.6 + github.com/prometheus/client_golang v1.20.2 + github.com/prometheus/common v0.57.0 + github.com/prometheus/prometheus v0.53.1 github.com/stretchr/testify v1.9.0 ) require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect - github.com/aws/aws-sdk-go v1.51.25 // indirect + github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -31,13 +31,11 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect - github.com/hashicorp/go-version v1.6.0 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.8 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -49,23 +47,22 @@ require ( github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - go.opentelemetry.io/collector/featuregate v1.5.0 // indirect - go.opentelemetry.io/collector/pdata v1.5.0 // indirect - go.opentelemetry.io/collector/semconv v0.98.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect - go.opentelemetry.io/otel v1.25.0 // indirect - go.opentelemetry.io/otel/metric v1.25.0 // indirect - go.opentelemetry.io/otel/trace v1.25.0 // indirect + go.opentelemetry.io/collector/pdata v1.8.0 // indirect + go.opentelemetry.io/collector/semconv v0.101.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect + go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/otel/trace v1.27.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/net v0.26.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/net v0.27.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect - google.golang.org/grpc v1.63.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect @@ -82,4 +79,10 @@ exclude ( cloud.google.com/go v0.34.0 cloud.google.com/go v0.65.0 cloud.google.com/go v0.82.0 + + // Fixing ambiguous import: found package google.golang.org/genproto/googleapis/api/annotations in multiple modules. + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 ) + +// TODO(bwplotka): Move to main branch commit or perhaps released version. +replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index ec04347111..1abeff7eb1 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,11 +1,11 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= @@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls= -github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -37,23 +37,20 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.113.0 h1:CLtCxlP4wDAjKIQ+Hshht/UNbgAp8/J/XBH1ZtDCF9Y= -github.com/digitalocean/godo v1.113.0/go.mod h1:Z2mTP848Vi3IXXl5YbPekUgr4j4tOePomA+OE1Ag98w= +github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= +github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.0.1+incompatible h1:t39Hm6lpXuXtgkF0dm1t9a5HkbUfdGy6XbWexmGr+hA= -github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -68,8 +65,8 @@ github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -95,8 +92,8 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= -github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA= -github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= @@ -135,44 +132,42 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM= -github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= +github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= -github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= +github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= -github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 h1:pjE59CS2C9Bg+Xby0ROrnZSSBWtKwx3Sf9gqsrvIFSA= -github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0= -github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k= +github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= +github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI= -github.com/influxdata/influxdb v1.11.5/go.mod h1:k8sWREQl1/9t46VrkrH5adUM4UNGIt206ipO3plbkw8= +github.com/influxdata/influxdb v1.11.6 h1:zS5MRY+RQ5/XFTer5R8xQRnY17JYSbacvO6OaP164wU= +github.com/influxdata/influxdb v1.11.6/go.mod h1:F10NoQb9qa04lME3pTPWQrYt4JZ/ke1Eei+1ttgHHrg= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -192,8 +187,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -208,14 +203,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.32.0 h1:OmZzB3iON6uu84VtLFf64uKmAQqJJarvmsVguroioPI= -github.com/linode/linodego v1.32.0/go.mod h1:y8GDP9uLVH4jTB9qyrgw79qfKdYJmNCGUOJmfuiOcmI= +github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= +github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= @@ -243,8 +238,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= -github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= +github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= +github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -258,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -269,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY= +github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -279,12 +274,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.52.1 h1:BrQ29YG+mzdGh8DgHPirHbeMGNqtL+INe0rqg7ttBJ4= -github.com/prometheus/prometheus v0.52.1/go.mod h1:3z74cVsmVH0iXOR5QBjB7Pa6A0KJeEAK5A6UsmAFb1g= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc= +github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -306,20 +301,18 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8 github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM= -go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= -go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE= -go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw= -go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY= -go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8= -go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= -go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= -go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= -go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= -go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= -go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= +go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= +go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= +go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= +go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -330,14 +323,14 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -351,8 +344,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= @@ -380,11 +373,11 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -397,21 +390,20 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= -google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU= -google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/go.mod b/go.mod index ce2f0714a0..c3f6bbe749 100644 --- a/go.mod +++ b/go.mod @@ -1,103 +1,104 @@ module github.com/prometheus/prometheus -go 1.21 +go 1.22.0 + +toolchain go1.23.0 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 github.com/Code-Hex/go-generics-cache v1.5.1 github.com/KimMachineGun/automemlimit v0.6.1 github.com/alecthomas/kingpin/v2 v2.4.0 - github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 - github.com/aws/aws-sdk-go v1.53.16 + github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 + github.com/aws/aws-sdk-go v1.55.5 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.117.0 - github.com/docker/docker v26.1.3+incompatible + github.com/digitalocean/godo v1.122.0 + github.com/docker/docker v27.2.0+incompatible github.com/edsrzf/mmap-go v1.1.0 - github.com/envoyproxy/go-control-plane v0.12.0 - github.com/envoyproxy/protoc-gen-validate v1.0.4 + github.com/envoyproxy/go-control-plane v0.13.0 + github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fsnotify/fsnotify v1.7.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 github.com/go-openapi/strfmt v0.23.0 - github.com/go-zookeeper/zk v1.0.3 + github.com/go-zookeeper/zk v1.0.4 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.6.0 - github.com/google/pprof v0.0.0-20240528025155-186aa0362fba + github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da github.com/google/uuid v1.6.0 - github.com/gophercloud/gophercloud v1.12.0 + github.com/gophercloud/gophercloud v1.14.0 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.29.1 - github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d - github.com/hetznercloud/hcloud-go/v2 v2.9.0 - github.com/ionos-cloud/sdk-go/v6 v6.1.11 + github.com/hashicorp/consul/api v1.29.4 + github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 + github.com/hetznercloud/hcloud-go/v2 v2.13.1 + github.com/ionos-cloud/sdk-go/v6 v6.2.1 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.8 + github.com/klauspost/compress v1.17.9 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.35.0 - github.com/miekg/dns v1.1.59 + github.com/linode/linodego v1.40.0 + github.com/miekg/dns v1.1.62 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 - github.com/ovh/go-ovh v1.5.1 + github.com/ovh/go-ovh v1.6.0 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.54.0 + github.com/prometheus/common v0.59.1 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.11.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 + github.com/prometheus/exporter-toolkit v0.12.0 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.8.0 - go.opentelemetry.io/collector/semconv v0.101.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 - go.opentelemetry.io/otel v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 - go.opentelemetry.io/otel/sdk v1.27.0 - go.opentelemetry.io/otel/trace v1.27.0 + go.opentelemetry.io/collector/pdata v1.14.1 + go.opentelemetry.io/collector/semconv v0.108.1 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 + go.opentelemetry.io/otel v1.29.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 + go.opentelemetry.io/otel/sdk v1.29.0 + go.opentelemetry.io/otel/trace v1.29.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/net v0.26.0 - golang.org/x/oauth2 v0.21.0 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.21.0 - golang.org/x/text v0.16.0 - golang.org/x/time v0.5.0 - golang.org/x/tools v0.22.0 - google.golang.org/api v0.183.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 - google.golang.org/grpc v1.64.0 - google.golang.org/protobuf v1.34.1 + golang.org/x/oauth2 v0.23.0 + golang.org/x/sync v0.8.0 + golang.org/x/sys v0.25.0 + golang.org/x/text v0.18.0 + golang.org/x/time v0.6.0 + golang.org/x/tools v0.24.0 + google.golang.org/api v0.195.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed + google.golang.org/grpc v1.66.0 + google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.29.3 - k8s.io/apimachinery v0.29.3 - k8s.io/client-go v0.29.3 + k8s.io/api v0.31.0 + k8s.io/apimachinery v0.31.0 + k8s.io/client-go v0.31.0 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.120.1 + k8s.io/klog/v2 v2.130.1 ) require ( - cloud.google.com/go/auth v0.5.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect + cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect @@ -105,7 +106,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect - github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect + github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -114,12 +115,12 @@ require ( github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kit/kit v0.12.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.22.2 // indirect github.com/go-openapi/errors v0.22.0 // indirect @@ -132,17 +133,17 @@ require ( github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/glog v1.2.0 // indirect + github.com/golang/glog v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -162,6 +163,8 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mdlayher/vsock v1.2.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect @@ -175,34 +178,38 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect - golang.org/x/crypto v0.24.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + golang.org/x/crypto v0.26.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.18.0 // indirect - golang.org/x/term v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/term v0.23.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) replace ( k8s.io/klog => github.com/simonpasquier/klog-gokit v0.3.0 - k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.3.0 + k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.5.0 ) // Exclude linodego v1.0.0 as it is no longer published on github. diff --git a/go.sum b/go.sum index 956b9d8949..6f31bec93f 100644 --- a/go.sum +++ b/go.sum @@ -12,18 +12,18 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= -cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -36,12 +36,12 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= @@ -75,8 +75,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= -github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= @@ -120,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= @@ -143,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= -github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= +github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg= +github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= +github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -168,13 +168,11 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -191,6 +189,8 @@ github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -210,8 +210,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= @@ -236,10 +236,10 @@ github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= +github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -251,8 +251,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -319,23 +319,23 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g= -github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= +github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= -github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= -github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8= +github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -350,13 +350,13 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= -github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= -github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= -github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= +github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= +github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= +github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= +github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= @@ -396,8 +396,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -409,13 +409,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= -github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= -github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= +github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ= +github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -423,8 +423,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= -github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= +github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -454,8 +454,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= -github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= +github.com/linode/linodego v1.40.0 h1:7ESY0PwK94hoggoCtIroT1Xk6b1flrFBNZ6KwqbTqlI= +github.com/linode/linodego v1.40.0/go.mod h1:NsUw4l8QrLdIofRg1NYFBbW5ZERnmbZykVBszPZLORM= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -497,11 +497,15 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= +github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= -github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -553,11 +557,11 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -573,8 +577,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= -github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= +github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= +github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -592,6 +596,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -608,8 +614,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -625,22 +631,22 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= +github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= -github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= +github.com/prometheus/exporter-toolkit v0.12.0 h1:DkE5RcEZR3lQA2QD5JLVQIf41dFKNsVMXFhgqcif7fo= +github.com/prometheus/exporter-toolkit v0.12.0/go.mod h1:fQH0KtTn0yrrS0S82kqppRjDDiwMfIQUwT+RBRRhwUc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -650,8 +656,8 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= @@ -661,8 +667,8 @@ github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1 github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs= github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k= -github.com/simonpasquier/klog-gokit/v3 v3.3.0 h1:HMzH999kO5gEgJTaWWO+xjncW5oycspcsBnjn9b853Q= -github.com/simonpasquier/klog-gokit/v3 v3.3.0/go.mod h1:uSbnWC3T7kt1dQyY9sjv0Ao1SehMAJdVnUNSKhjaDsg= +github.com/simonpasquier/klog-gokit/v3 v3.5.0 h1:ewnk+ickph0hkQFgdI4pffKIbruAxxWcg0Fe/vQmLOM= +github.com/simonpasquier/klog-gokit/v3 v3.5.0/go.mod h1:S9flvRzzpaYLYtXI2w8jf9R/IU/Cy14NrbvDUevNP1E= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -694,6 +700,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -702,6 +709,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -723,28 +732,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= -go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= -go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= -go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1IxMHQf6mUk= +go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8= +go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= +go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -773,8 +782,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -809,8 +818,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -856,16 +865,16 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -879,8 +888,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -946,16 +955,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -967,14 +976,15 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1025,8 +1035,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1046,8 +1056,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= -google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= +google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= +google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1084,10 +1094,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1106,8 +1116,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= +google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1119,8 +1129,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1129,6 +1139,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -1162,16 +1174,16 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= @@ -1180,6 +1192,6 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h6 sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/model/exemplar/exemplar.go b/model/exemplar/exemplar.go index 08f55374ef..2c28b17257 100644 --- a/model/exemplar/exemplar.go +++ b/model/exemplar/exemplar.go @@ -15,7 +15,9 @@ package exemplar import "github.com/prometheus/prometheus/model/labels" -// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters +// ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of +// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 +// UTF-8 characters." // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars const ExemplarMaxLabelSetLength = 128 @@ -49,7 +51,7 @@ func (e Exemplar) Equals(e2 Exemplar) bool { return e.Value == e2.Value } -// Sort first by timestamp, then value, then labels. +// Compare first timestamps, then values, then labels. func Compare(a, b Exemplar) int { if a.Ts < b.Ts { return -1 diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 2a37ea66d4..b0deb6cc4d 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -230,6 +230,17 @@ func (h *FloatHistogram) TestExpression() string { res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues)) } + switch m.CounterResetHint { + case UnknownCounterReset: + // Unknown is the default, don't add anything. + case CounterReset: + res = append(res, "counter_reset_hint:reset") + case NotCounterReset: + res = append(res, "counter_reset_hint:not_reset") + case GaugeType: + res = append(res, "counter_reset_hint:gauge") + } + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { if len(spans) > 1 { panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) @@ -342,7 +353,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { default: // All other cases shouldn't actually happen. // They are a direct collision of CounterReset and NotCounterReset. - // Conservatively set the CounterResetHint to "unknown" and isse a warning. + // Conservatively set the CounterResetHint to "unknown" and issue a warning. h.CounterResetHint = UnknownCounterReset // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place } @@ -658,7 +669,7 @@ func detectReset(currIt, prevIt *floatBucketIterator) bool { if !currIt.Next() { // Reached end of currIt early, therefore // previous histogram has a bucket that the - // current one does not have. Unlass all + // current one does not have. Unless all // remaining buckets in the previous histogram // are unpopulated, this is a reset. for { @@ -891,7 +902,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() { // reconcileZeroBuckets finds a zero bucket large enough to include the zero // buckets of both histograms (the receiving histogram and the other histogram) // with a zero threshold that is not within a populated bucket in either -// histogram. This method modifies the receiving histogram accourdingly, but +// histogram. This method modifies the receiving histogram accordingly, but // leaves the other histogram as is. Instead, it returns the zero count the // other histogram would have if it were modified. func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 1558a6d679..cf370a313e 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -131,6 +131,54 @@ func TestFloatHistogramMul(t *testing.T) { NegativeBuckets: []float64{9, 3, 15, 18}, }, }, + { + "negation", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -11, + Count: -30, + Sum: -23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-1, 0, -3, -4, -7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-3, -1, -5, -6}, + }, + }, + { + "negative multiplier", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -22, + Count: -60, + Sum: -46, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-2, 0, -6, -8, -14}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-6, -2, -10, -12}, + }, + }, { "no-op with custom buckets", &FloatHistogram{ @@ -409,6 +457,54 @@ func TestFloatHistogramDiv(t *testing.T) { NegativeBuckets: []float64{1.5, 0.5, 2.5, 3}, }, }, + { + "negation", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + -1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -5.5, + Count: -3493.3, + Sum: -2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{-1, -3.3, -4.2, -0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-3.1, -3, -1.234e5, -1000}, + }, + }, + { + "negative half", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -5.5, + Count: -15, + Sum: -11.5, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-0.5, 0, -1.5, -2, -3.5}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-1.5, -0.5, -2.5, -3}, + }, + }, { "no-op with custom buckets", &FloatHistogram{ diff --git a/model/labels/labels.go b/model/labels/labels.go index 01514abf38..f4de7496ce 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -38,10 +38,10 @@ func (ls Labels) Bytes(buf []byte) []byte { b.WriteByte(labelSep) for i, l := range ls { if i > 0 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(l.Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(l.Value) } return b.Bytes() @@ -86,9 +86,9 @@ func (ls Labels) Hash() uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b) } @@ -106,9 +106,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { i++ default: b = append(b, ls[i].Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, ls[i].Value...) - b = append(b, seps[0]) + b = append(b, sep) i++ j++ } @@ -130,9 +130,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, ls[i].Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, ls[i].Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -151,10 +151,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { i++ default: if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(ls[i].Value) i++ j++ @@ -177,10 +177,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { continue } if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(ls[i].Value) } return b.Bytes() @@ -315,7 +315,8 @@ func Compare(a, b Labels) int { return len(a) - len(b) } -// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +// CopyFrom copies labels from b on top of whatever was in ls previously, +// reusing memory or expanding if needed. func (ls *Labels) CopyFrom(b Labels) { (*ls) = append((*ls)[:0], b...) } @@ -422,7 +423,7 @@ type ScratchBuilder struct { add Labels } -// Symbol-table is no-op, just for api parity with dedupelabels. +// SymbolTable is no-op, just for api parity with dedupelabels. type SymbolTable struct{} func NewSymbolTable() *SymbolTable { return nil } @@ -458,7 +459,7 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } -// Add a name/value pair, using []byte instead of string. +// UnsafeAddBytes adds a name/value pair, using []byte instead of string. // The '-tags stringlabels' version of this function is unsafe, hence the name. // This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { @@ -475,14 +476,14 @@ func (b *ScratchBuilder) Assign(ls Labels) { b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. } -// Return the name/value pairs added so far as a Labels object. +// Labels returns the name/value pairs added so far as a Labels object. // Note: if you want them sorted, call Sort() first. func (b *ScratchBuilder) Labels() Labels { // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. return append([]Label{}, b.add...) } -// Write the newly-built Labels out to ls. +// Overwrite the newly-built Labels out to ls. // Callers must ensure that there are no other references to ls, or any strings fetched from it. func (b *ScratchBuilder) Overwrite(ls *Labels) { *ls = append((*ls)[:0], b.add...) diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index 4bc94f84fe..99529a3836 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -29,10 +29,11 @@ const ( BucketLabel = "le" InstanceName = "instance" - labelSep = '\xfe' + labelSep = '\xfe' // Used at beginning of `Bytes` return. + sep = '\xff' // Used between labels in `Bytes` and `Hash`. ) -var seps = []byte{'\xff'} +var seps = []byte{sep} // Used with Hash, which has no WriteByte method. // Label is a key/value pair of strings. type Label struct { @@ -94,12 +95,23 @@ func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { } // IsValid checks if the metric name or label names are valid. -func (ls Labels) IsValid() bool { +func (ls Labels) IsValid(validationScheme model.ValidationScheme) bool { err := ls.Validate(func(l Label) error { - if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return strconv.ErrSyntax + if l.Name == model.MetricNameLabel { + // If the default validation scheme has been overridden with legacy mode, + // we need to call the special legacy validation checker. + if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) { + return strconv.ErrSyntax + } + if !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } } - if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation { + if !model.LabelName(l.Name).IsValidLegacy() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + } else if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { return strconv.ErrSyntax } return nil @@ -218,5 +230,5 @@ func contains(s []Label, n string) bool { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go index 0e5bb048be..da8a88cc15 100644 --- a/model/labels/labels_dedupelabels.go +++ b/model/labels/labels_dedupelabels.go @@ -146,13 +146,13 @@ func (ls Labels) Bytes(buf []byte) []byte { b := bytes.NewBuffer(buf[:0]) for i := 0; i < len(ls.data); { if i > 0 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } var name, value string name, i = decodeString(ls.syms, ls.data, i) value, i = decodeString(ls.syms, ls.data, i) b.WriteString(name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(value) } return b.Bytes() @@ -201,9 +201,9 @@ func (ls Labels) Hash() uint64 { } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) pos = newPos } return xxhash.Sum64(b) @@ -226,9 +226,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { } if name == names[j] { b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } } @@ -252,9 +252,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -275,10 +275,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { } if lName == names[j] { if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(lName) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(lValue) } pos = newPos @@ -299,10 +299,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { } if j == len(names) || lName != names[j] { if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(lName) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(lValue) } pos = newPos diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index bccceb61fe..c64bb990e0 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -16,7 +16,6 @@ package labels import ( - "reflect" "slices" "strings" "unsafe" @@ -112,9 +111,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { } if name == names[j] { b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } } @@ -138,9 +137,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -299,10 +298,8 @@ func Equal(ls, o Labels) bool { func EmptyLabels() Labels { return Labels{} } -func yoloBytes(s string) (b []byte) { - *(*string)(unsafe.Pointer(&b)) = s - (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) - return +func yoloBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) } // New returns a sorted Labels from the given labels. @@ -338,8 +335,8 @@ func Compare(a, b Labels) int { } i := 0 // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. - sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) - lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + sp := unsafe.Pointer(unsafe.StringData(shorter)) + lp := unsafe.Pointer(unsafe.StringData(longer)) for ; i < len(shorter)-8; i += 8 { if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { break diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index d8910cdc85..9208908311 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -21,6 +21,7 @@ import ( "strings" "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) @@ -272,11 +273,86 @@ func TestLabels_IsValid(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - require.Equal(t, test.expected, test.input.IsValid()) + require.Equal(t, test.expected, test.input.IsValid(model.LegacyValidation)) }) } } +func TestLabels_ValidationModes(t *testing.T) { + for _, test := range []struct { + input Labels + globalMode model.ValidationScheme + callMode model.ValidationScheme + expected bool + }{ + { + input: FromStrings( + "__name__", "test.metric", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.UTF8Validation, + expected: true, + }, + { + input: FromStrings( + "__name__", "test", + "\xc5 bad utf8", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.UTF8Validation, + expected: false, + }, + { + // Setting the common model to legacy validation and then trying to check for UTF-8 on a + // per-call basis is not supported. + input: FromStrings( + "__name__", "test.utf8.metric", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.LegacyValidation, + callMode: model.UTF8Validation, + expected: false, + }, + { + input: FromStrings( + "__name__", "test", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.LegacyValidation, + callMode: model.LegacyValidation, + expected: true, + }, + { + input: FromStrings( + "__name__", "test.utf8.metric", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.LegacyValidation, + expected: false, + }, + { + input: FromStrings( + "__name__", "test", + "host.name", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.LegacyValidation, + expected: false, + }, + } { + model.NameValidationScheme = test.globalMode + require.Equal(t, test.expected, test.input.IsValid(test.callMode)) + } +} + func TestLabels_Equal(t *testing.T) { labels := FromStrings( "aaa", "111", diff --git a/model/labels/regexp.go b/model/labels/regexp.go index 767bd6942f..3df9435194 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -28,7 +28,7 @@ const ( maxSetMatches = 256 // The minimum number of alternate values a regex should have to trigger - // the optimization done by optimizeEqualStringMatchers() and so use a map + // the optimization done by optimizeEqualOrPrefixStringMatchers() and so use a map // to match values instead of iterating over a list. This value has // been computed running BenchmarkOptimizeEqualStringMatchers. minEqualMultiStringMatcherMapThreshold = 16 @@ -63,13 +63,13 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { // available, even if the string matcher is faster. m.matchString = m.stringMatcher.Matches } else { - parsed, err := syntax.Parse(v, syntax.Perl) + parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL) if err != nil { return nil, err } // Simplify the syntax tree to run faster. parsed = parsed.Simplify() - m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$") + m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$") if err != nil { return nil, err } @@ -337,7 +337,7 @@ func optimizeAlternatingLiterals(s string) (StringMatcher, []string) { return nil, nil } - multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates) + multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates, 0, 0) for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') { // Split the string into the next literal and the remainder @@ -412,7 +412,7 @@ func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher { clearBeginEndText(re) m := stringMatcherFromRegexpInternal(re) - m = optimizeEqualStringMatchers(m, minEqualMultiStringMatcherMapThreshold) + m = optimizeEqualOrPrefixStringMatchers(m, minEqualMultiStringMatcherMapThreshold) return m } @@ -732,17 +732,20 @@ func (m *equalStringMatcher) Matches(s string) bool { type multiStringMatcherBuilder interface { StringMatcher add(s string) + addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) setMatches() []string } -func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize int) multiStringMatcherBuilder { +func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize, estimatedPrefixes, minPrefixLength int) multiStringMatcherBuilder { // If the estimated size is low enough, it's faster to use a slice instead of a map. - if estimatedSize < minEqualMultiStringMatcherMapThreshold { + if estimatedSize < minEqualMultiStringMatcherMapThreshold && estimatedPrefixes == 0 { return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)} } return &equalMultiStringMapMatcher{ values: make(map[string]struct{}, estimatedSize), + prefixes: make(map[string][]StringMatcher, estimatedPrefixes), + minPrefixLen: minPrefixLength, caseSensitive: caseSensitive, } } @@ -758,6 +761,10 @@ func (m *equalMultiStringSliceMatcher) add(s string) { m.values = append(m.values, s) } +func (m *equalMultiStringSliceMatcher) addPrefix(_ string, _ bool, _ StringMatcher) { + panic("not implemented") +} + func (m *equalMultiStringSliceMatcher) setMatches() []string { return m.values } @@ -779,12 +786,17 @@ func (m *equalMultiStringSliceMatcher) Matches(s string) bool { return false } -// equalMultiStringMapMatcher matches a string exactly against a map of valid values. +// equalMultiStringMapMatcher matches a string exactly against a map of valid values +// or against a set of prefix matchers. type equalMultiStringMapMatcher struct { // values contains values to match a string against. If the matching is case insensitive, // the values here must be lowercase. values map[string]struct{} - + // prefixes maps strings, all of length minPrefixLen, to sets of matchers to check the rest of the string. + // If the matching is case insensitive, prefixes are all lowercase. + prefixes map[string][]StringMatcher + // minPrefixLen can be zero, meaning there are no prefix matchers. + minPrefixLen int caseSensitive bool } @@ -796,8 +808,27 @@ func (m *equalMultiStringMapMatcher) add(s string) { m.values[s] = struct{}{} } +func (m *equalMultiStringMapMatcher) addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) { + if m.minPrefixLen == 0 { + panic("addPrefix called when no prefix length defined") + } + if len(prefix) < m.minPrefixLen { + panic("addPrefix called with a too short prefix") + } + if m.caseSensitive != prefixCaseSensitive { + panic("addPrefix called with a prefix whose case sensitivity is different than the expected one") + } + + s := prefix[:m.minPrefixLen] + if !m.caseSensitive { + s = strings.ToLower(s) + } + + m.prefixes[s] = append(m.prefixes[s], matcher) +} + func (m *equalMultiStringMapMatcher) setMatches() []string { - if len(m.values) >= maxSetMatches { + if len(m.values) >= maxSetMatches || len(m.prefixes) > 0 { return nil } @@ -813,8 +844,17 @@ func (m *equalMultiStringMapMatcher) Matches(s string) bool { s = toNormalisedLower(s) } - _, ok := m.values[s] - return ok + if _, ok := m.values[s]; ok { + return true + } + if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen { + for _, matcher := range m.prefixes[s[:m.minPrefixLen]] { + if matcher.Matches(s) { + return true + } + } + } + return false } // toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert @@ -897,20 +937,24 @@ func (m trueMatcher) Matches(_ string) bool { return true } -// optimizeEqualStringMatchers optimize a specific case where all matchers are made by an -// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher). In -// this specific case, when we have many strings to match against we can use a map instead +// optimizeEqualOrPrefixStringMatchers optimize a specific case where all matchers are made by an +// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher) or +// with a literal prefix (literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher). +// +// In this specific case, when we have many strings to match against we can use a map instead // of iterating over the list of strings. -func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatcher { +func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) StringMatcher { var ( caseSensitive bool caseSensitiveSet bool numValues int + numPrefixes int + minPrefixLength int ) // Analyse the input StringMatcher to count the number of occurrences // and ensure all of them have the same case sensitivity. - analyseCallback := func(matcher *equalStringMatcher) bool { + analyseEqualMatcherCallback := func(matcher *equalStringMatcher) bool { // Ensure we don't have mixed case sensitivity. if caseSensitiveSet && caseSensitive != matcher.caseSensitive { return false @@ -923,34 +967,55 @@ func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatch return true } - if !findEqualStringMatchers(input, analyseCallback) { + analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + // Ensure we don't have mixed case sensitivity. + if caseSensitiveSet && caseSensitive != prefixCaseSensitive { + return false + } else if !caseSensitiveSet { + caseSensitive = prefixCaseSensitive + caseSensitiveSet = true + } + if numPrefixes == 0 || len(prefix) < minPrefixLength { + minPrefixLength = len(prefix) + } + + numPrefixes++ + return true + } + + if !findEqualOrPrefixStringMatchers(input, analyseEqualMatcherCallback, analysePrefixMatcherCallback) { return input } - // If the number of values found is less than the threshold, then we should skip the optimization. - if numValues < threshold { + // If the number of values and prefixes found is less than the threshold, then we should skip the optimization. + if (numValues + numPrefixes) < threshold { return input } // Parse again the input StringMatcher to extract all values and storing them. // We can skip the case sensitivity check because we've already checked it and // if the code reach this point then it means all matchers have the same case sensitivity. - multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues) + multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues, numPrefixes, minPrefixLength) // Ignore the return value because we already iterated over the input StringMatcher // and it was all good. - findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool { + findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { multiMatcher.add(matcher.s) return true + }, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + multiMatcher.addPrefix(prefix, caseSensitive, matcher) + return true }) return multiMatcher } -// findEqualStringMatchers analyze the input StringMatcher and calls the callback for each -// equalStringMatcher found. Returns true if and only if the input StringMatcher is *only* -// composed by an alternation of equalStringMatcher. -func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalStringMatcher) bool) bool { +// findEqualOrPrefixStringMatchers analyze the input StringMatcher and calls the equalMatcherCallback for each +// equalStringMatcher found, and prefixMatcherCallback for each literalPrefixSensitiveStringMatcher and literalPrefixInsensitiveStringMatcher found. +// +// Returns true if and only if the input StringMatcher is *only* composed by an alternation of equalStringMatcher and/or +// literal prefix matcher. Returns false if prefixMatcherCallback is nil and a literal prefix matcher is encountered. +func findEqualOrPrefixStringMatchers(input StringMatcher, equalMatcherCallback func(matcher *equalStringMatcher) bool, prefixMatcherCallback func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool) bool { orInput, ok := input.(orStringMatcher) if !ok { return false @@ -959,17 +1024,27 @@ func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalSt for _, m := range orInput { switch casted := m.(type) { case orStringMatcher: - if !findEqualStringMatchers(m, callback) { + if !findEqualOrPrefixStringMatchers(m, equalMatcherCallback, prefixMatcherCallback) { return false } case *equalStringMatcher: - if !callback(casted) { + if !equalMatcherCallback(casted) { + return false + } + + case *literalPrefixSensitiveStringMatcher: + if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, true, casted) { + return false + } + + case *literalPrefixInsensitiveStringMatcher: + if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, false, casted) { return false } default: - // It's not an equal string matcher, so we have to stop searching + // It's not an equal or prefix string matcher, so we have to stop searching // cause this optimization can't be applied. return false } diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index fa5c96f420..8df0dbb023 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -71,6 +71,8 @@ var ( // A long case insensitive alternation. "(?i:(zQPbMkNO|NNSPdvMi|iWuuSoAl|qbvKMimS|IecrXtPa|seTckYqt|NxnyHkgB|fIDlOgKb|UhlWIygH|OtNoJxHG|cUTkFVIV|mTgFIHjr|jQkoIDtE|PPMKxRXl|AwMfwVkQ|CQyMrTQJ|BzrqxVSi|nTpcWuhF|PertdywG|ZZDgCtXN|WWdDPyyE|uVtNQsKk|BdeCHvPZ|wshRnFlH|aOUIitIp|RxZeCdXT|CFZMslCj|AVBZRDxl|IzIGCnhw|ythYuWiz|oztXVXhl|VbLkwqQx|qvaUgyVC|VawUjPWC|ecloYJuj|boCLTdSU|uPrKeAZx|hrMWLWBq|JOnUNHRM|rYnujkPq|dDEdZhIj|DRrfvugG|yEGfDxVV|YMYdJWuP|PHUQZNWM|AmKNrLis|zTxndVfn|FPsHoJnc|EIulZTua|KlAPhdzg|ScHJJCLt|NtTfMzME|eMCwuFdo|SEpJVJbR|cdhXZeCx|sAVtBwRh|kVFEVcMI|jzJrxraA|tGLHTell|NNWoeSaw|DcOKSetX|UXZAJyka|THpMphDP|rizheevl|kDCBRidd|pCZZRqyu|pSygkitl|SwZGkAaW|wILOrfNX|QkwVOerj|kHOMxPDr|EwOVycJv|AJvtzQFS|yEOjKYYB|LizIINLL|JBRSsfcG|YPiUqqNl|IsdEbvee|MjEpGcBm|OxXZVgEQ|xClXGuxa|UzRCGFEb|buJbvfvA|IPZQxRet|oFYShsMc|oBHffuHO|bzzKrcBR|KAjzrGCl|IPUsAVls|OGMUMbIU|gyDccHuR|bjlalnDd|ZLWjeMna|fdsuIlxQ|dVXtiomV|XxedTjNg|XWMHlNoA|nnyqArQX|opfkWGhb|wYtnhdYb))", "(?i:(AAAAAAAAAAAAAAAAAAAAAAAA|BBBBBBBBBBBBBBBBBBBBBBBB|cccccccccccccccccccccccC|ſſſſſſſſſſſſſſſſſſſſſſſſS|SSSSSSSSSSSSSSSSSSSSSSSSſ))", + // A short case insensitive alternation where each entry ends with ".*". + "(?i:(zQPbMkNO.*|NNSPdvMi.*|iWuuSoAl.*))", // A long case insensitive alternation where each entry ends with ".*". "(?i:(zQPbMkNO.*|NNSPdvMi.*|iWuuSoAl.*|qbvKMimS.*|IecrXtPa.*|seTckYqt.*|NxnyHkgB.*|fIDlOgKb.*|UhlWIygH.*|OtNoJxHG.*|cUTkFVIV.*|mTgFIHjr.*|jQkoIDtE.*|PPMKxRXl.*|AwMfwVkQ.*|CQyMrTQJ.*|BzrqxVSi.*|nTpcWuhF.*|PertdywG.*|ZZDgCtXN.*|WWdDPyyE.*|uVtNQsKk.*|BdeCHvPZ.*|wshRnFlH.*|aOUIitIp.*|RxZeCdXT.*|CFZMslCj.*|AVBZRDxl.*|IzIGCnhw.*|ythYuWiz.*|oztXVXhl.*|VbLkwqQx.*|qvaUgyVC.*|VawUjPWC.*|ecloYJuj.*|boCLTdSU.*|uPrKeAZx.*|hrMWLWBq.*|JOnUNHRM.*|rYnujkPq.*|dDEdZhIj.*|DRrfvugG.*|yEGfDxVV.*|YMYdJWuP.*|PHUQZNWM.*|AmKNrLis.*|zTxndVfn.*|FPsHoJnc.*|EIulZTua.*|KlAPhdzg.*|ScHJJCLt.*|NtTfMzME.*|eMCwuFdo.*|SEpJVJbR.*|cdhXZeCx.*|sAVtBwRh.*|kVFEVcMI.*|jzJrxraA.*|tGLHTell.*|NNWoeSaw.*|DcOKSetX.*|UXZAJyka.*|THpMphDP.*|rizheevl.*|kDCBRidd.*|pCZZRqyu.*|pSygkitl.*|SwZGkAaW.*|wILOrfNX.*|QkwVOerj.*|kHOMxPDr.*|EwOVycJv.*|AJvtzQFS.*|yEOjKYYB.*|LizIINLL.*|JBRSsfcG.*|YPiUqqNl.*|IsdEbvee.*|MjEpGcBm.*|OxXZVgEQ.*|xClXGuxa.*|UzRCGFEb.*|buJbvfvA.*|IPZQxRet.*|oFYShsMc.*|oBHffuHO.*|bzzKrcBR.*|KAjzrGCl.*|IPUsAVls.*|OGMUMbIU.*|gyDccHuR.*|bjlalnDd.*|ZLWjeMna.*|fdsuIlxQ.*|dVXtiomV.*|XxedTjNg.*|XWMHlNoA.*|nnyqArQX.*|opfkWGhb.*|wYtnhdYb.*))", // A long case insensitive alternation where each entry starts with ".*". @@ -119,7 +121,7 @@ func TestFastRegexMatcher_MatchString(t *testing.T) { t.Parallel() m, err := NewFastRegexMatcher(r) require.NoError(t, err) - re := regexp.MustCompile("^(?:" + r + ")$") + re := regexp.MustCompile("^(?s:" + r + ")$") require.Equal(t, re.MatchString(v), m.MatchString(v)) }) } @@ -165,7 +167,7 @@ func TestOptimizeConcatRegex(t *testing.T) { } for _, c := range cases { - parsed, err := syntax.Parse(c.regex, syntax.Perl) + parsed, err := syntax.Parse(c.regex, syntax.Perl|syntax.DotNL) require.NoError(t, err) prefix, suffix, contains := optimizeConcatRegex(parsed) @@ -246,7 +248,7 @@ func TestFindSetMatches(t *testing.T) { c := c t.Run(c.pattern, func(t *testing.T) { t.Parallel() - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matches, actualCaseSensitive := findSetMatches(parsed) require.Equal(t, c.expMatches, matches) @@ -346,15 +348,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { pattern string exp StringMatcher }{ - {".*", anyStringWithoutNewlineMatcher{}}, - {".*?", anyStringWithoutNewlineMatcher{}}, + {".*", trueMatcher{}}, + {".*?", trueMatcher{}}, {"(?s:.*)", trueMatcher{}}, - {"(.*)", anyStringWithoutNewlineMatcher{}}, - {"^.*$", anyStringWithoutNewlineMatcher{}}, - {".+", &anyNonEmptyStringMatcher{matchNL: false}}, + {"(.*)", trueMatcher{}}, + {"^.*$", trueMatcher{}}, + {".+", &anyNonEmptyStringMatcher{matchNL: true}}, {"(?s:.+)", &anyNonEmptyStringMatcher{matchNL: true}}, - {"^.+$", &anyNonEmptyStringMatcher{matchNL: false}}, - {"(.+)", &anyNonEmptyStringMatcher{matchNL: false}}, + {"^.+$", &anyNonEmptyStringMatcher{matchNL: true}}, + {"(.+)", &anyNonEmptyStringMatcher{matchNL: true}}, {"", emptyStringMatcher{}}, {"^$", emptyStringMatcher{}}, {"^foo$", &equalStringMatcher{s: "foo", caseSensitive: true}}, @@ -364,23 +366,23 @@ func TestStringMatcherFromRegexp(t *testing.T) { {`(?i:((foo1|foo2|bar)))`, orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})}, {"^((?i:foo|oo)|(bar))$", orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO", caseSensitive: false}, &equalStringMatcher{s: "OO", caseSensitive: false}, &equalStringMatcher{s: "bar", caseSensitive: true}})}, {"(?i:(foo1|foo2|bar))", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})}, - {".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: anyStringWithoutNewlineMatcher{}}}, - {"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, + {".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: trueMatcher{}}}, + {"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, {"10\\.0\\.(1|2)\\.+", nil}, - {"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{}, suffix: "foo", suffixCaseSensitive: true}}, - {"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: anyStringWithoutNewlineMatcher{}}}, - {"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}, &equalStringMatcher{s: "foo", caseSensitive: true}})}, - {"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: false}}})}, - {"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: nil}}, + {"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{matchNL: true}, suffix: "foo", suffixCaseSensitive: true}}, + {"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: trueMatcher{}}}, + {"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}, &equalStringMatcher{s: "foo", caseSensitive: true}})}, + {"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: true}}})}, + {"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: nil}}, // we don't support case insensitive matching for contains. // This is because there's no strings.IndexOfFold function. // We can revisit later if this is really popular by using strings.ToUpper. @@ -391,15 +393,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { {".*foo.*bar.*", nil}, {`\d*`, nil}, {".", nil}, - {"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: anyStringWithoutNewlineMatcher{}}}}}, + {"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: trueMatcher{}}}}}, // This one is not supported because `stringMatcherFromRegexp` is not reentrant for syntax.OpConcat. // It would make the code too complex to handle it. {"(.+)/(foo.*|bar$)", nil}, // Case sensitive alternate with same literal prefix and .* suffix. - {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: anyStringWithoutNewlineMatcher{}}}}}, + {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: trueMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: trueMatcher{}}}}}, // Case insensitive alternate with same literal prefix and .* suffix. - {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, - {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, + {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}}, + {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}}, // Concatenated variable length selectors are not supported. {"foo.*.*", nil}, {"foo.+.+", nil}, @@ -408,15 +410,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { {"aaa.?.?", nil}, {"aaa.?.*", nil}, // Regexps with ".?". - {"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, + {"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, {"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, - {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}}, + {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}}, {"f.?o", nil}, } { c := c t.Run(c.pattern, func(t *testing.T) { t.Parallel() - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matches := stringMatcherFromRegexp(parsed) require.Equal(t, c.exp, matches) @@ -435,16 +437,16 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) { { pattern: "(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", expectedLiteralPrefixMatchers: 3, - expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX"}, - expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "xyz-016a-ixb-dp\n"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"}, + expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Case insensitive. { pattern: "(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", expectedLiteralPrefixMatchers: 3, - expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX"}, - expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp", "xyz-016a-ixb-dp\n"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"}, + expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp"}, }, // Nested literal prefixes, case sensitive. @@ -472,13 +474,13 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) { }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher. numPrefixMatchers := 0 @@ -521,16 +523,16 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) { { pattern: "(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)", expectedLiteralSuffixMatchers: 2, - expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op"}, - expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"}, + expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Case insensitive. { pattern: "(?i)(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)", expectedLiteralSuffixMatchers: 2, - expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op"}, - expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"}, + expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Nested literal suffixes, case sensitive. @@ -550,13 +552,13 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) { }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains literalSuffixStringMatcher. numSuffixMatchers := 0 @@ -596,26 +598,26 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) { { pattern: "test.?", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"test", "test!"}, - expectedNotMatches: []string{"test\n", "tes", "test!!"}, + expectedMatches: []string{"test\n", "test", "test!"}, + expectedNotMatches: []string{"tes", "test!!"}, }, { pattern: ".?test", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"test", "!test"}, - expectedNotMatches: []string{"\ntest", "tes", "test!"}, + expectedMatches: []string{"\ntest", "test", "!test"}, + expectedNotMatches: []string{"tes", "test!"}, }, { pattern: "(aaa.?|bbb.?)", expectedZeroOrOneMatchers: 2, - expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX"}, - expectedNotMatches: []string{"aa", "aaaXX", "aaa\n", "bb", "bbbXX", "bbb\n"}, + expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX", "aaa\n", "bbb\n"}, + expectedNotMatches: []string{"aa", "aaaXX", "bb", "bbbXX"}, }, { pattern: ".*aaa.?", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX"}, - expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX", "XXXaaa\n"}, + expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX", "XXXaaa\n"}, + expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX"}, }, // Match newline. @@ -630,18 +632,18 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) { { pattern: "(aaa.?|((?s).?bbb.+))", expectedZeroOrOneMatchers: 2, - expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX"}, - expectedNotMatches: []string{"aa", "aaa\n", "Xbbb", "\nbbb"}, + expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX", "aaa\n"}, + expectedNotMatches: []string{"aa", "Xbbb", "\nbbb"}, }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains zeroOrOneCharacterStringMatcher. numZeroOrOneMatchers := 0 @@ -686,7 +688,15 @@ func randStrings(randGenerator *rand.Rand, many, length int) []string { return out } -func TestOptimizeEqualStringMatchers(t *testing.T) { +func randStringsWithSuffix(randGenerator *rand.Rand, many, length int, suffix string) []string { + out := randStrings(randGenerator, many, length) + for i := range out { + out[i] += suffix + } + return out +} + +func TestOptimizeEqualOrPrefixStringMatchers(t *testing.T) { tests := map[string]struct { input StringMatcher expectedValues []string @@ -767,7 +777,7 @@ func TestOptimizeEqualStringMatchers(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - actualMatcher := optimizeEqualStringMatchers(testData.input, 0) + actualMatcher := optimizeEqualOrPrefixStringMatchers(testData.input, 0) if testData.expectedValues == nil { require.IsType(t, testData.input, actualMatcher) @@ -782,10 +792,12 @@ func TestOptimizeEqualStringMatchers(t *testing.T) { func TestNewEqualMultiStringMatcher(t *testing.T) { tests := map[string]struct { - values []string - caseSensitive bool - expectedValuesMap map[string]struct{} - expectedValuesList []string + values []string + caseSensitivePrefixes []*literalPrefixSensitiveStringMatcher + caseSensitive bool + expectedValuesMap map[string]struct{} + expectedPrefixesMap map[string][]StringMatcher + expectedValuesList []string }{ "few case sensitive values": { values: []string{"a", "B"}, @@ -797,27 +809,47 @@ func TestNewEqualMultiStringMatcher(t *testing.T) { caseSensitive: false, expectedValuesList: []string{"a", "B"}, }, + "few case sensitive values and prefixes": { + values: []string{"a"}, + caseSensitivePrefixes: []*literalPrefixSensitiveStringMatcher{{prefix: "B", right: anyStringWithoutNewlineMatcher{}}}, + caseSensitive: true, + expectedValuesMap: map[string]struct{}{"a": {}}, + expectedPrefixesMap: map[string][]StringMatcher{"B": {&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}}}, + }, "many case sensitive values": { - values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, - caseSensitive: true, - expectedValuesMap: map[string]struct{}{"a": {}, "B": {}, "c": {}, "D": {}, "e": {}, "F": {}, "g": {}, "H": {}, "i": {}, "L": {}, "m": {}, "N": {}, "o": {}, "P": {}, "q": {}, "r": {}}, + values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, + caseSensitive: true, + expectedValuesMap: map[string]struct{}{"a": {}, "B": {}, "c": {}, "D": {}, "e": {}, "F": {}, "g": {}, "H": {}, "i": {}, "L": {}, "m": {}, "N": {}, "o": {}, "P": {}, "q": {}, "r": {}}, + expectedPrefixesMap: map[string][]StringMatcher{}, }, "many case insensitive values": { - values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, - caseSensitive: false, - expectedValuesMap: map[string]struct{}{"a": {}, "b": {}, "c": {}, "d": {}, "e": {}, "f": {}, "g": {}, "h": {}, "i": {}, "l": {}, "m": {}, "n": {}, "o": {}, "p": {}, "q": {}, "r": {}}, + values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, + caseSensitive: false, + expectedValuesMap: map[string]struct{}{"a": {}, "b": {}, "c": {}, "d": {}, "e": {}, "f": {}, "g": {}, "h": {}, "i": {}, "l": {}, "m": {}, "n": {}, "o": {}, "p": {}, "q": {}, "r": {}}, + expectedPrefixesMap: map[string][]StringMatcher{}, }, } for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values)) + // To keep this test simple, we always assume a min prefix length of 1. + minPrefixLength := 0 + if len(testData.caseSensitivePrefixes) > 0 { + minPrefixLength = 1 + } + + matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values), len(testData.caseSensitivePrefixes), minPrefixLength) for _, v := range testData.values { matcher.add(v) } - if testData.expectedValuesMap != nil { + for _, p := range testData.caseSensitivePrefixes { + matcher.addPrefix(p.prefix, true, p) + } + + if testData.expectedValuesMap != nil || testData.expectedPrefixesMap != nil { require.IsType(t, &equalMultiStringMapMatcher{}, matcher) require.Equal(t, testData.expectedValuesMap, matcher.(*equalMultiStringMapMatcher).values) + require.Equal(t, testData.expectedPrefixesMap, matcher.(*equalMultiStringMapMatcher).prefixes) require.Equal(t, testData.caseSensitive, matcher.(*equalMultiStringMapMatcher).caseSensitive) } if testData.expectedValuesList != nil { @@ -829,9 +861,32 @@ func TestNewEqualMultiStringMatcher(t *testing.T) { } } +func TestEqualMultiStringMapMatcher_addPrefix(t *testing.T) { + t.Run("should panic if the matcher is case sensitive but the prefix is not case sensitive", func(t *testing.T) { + matcher := newEqualMultiStringMatcher(true, 0, 1, 1) + + require.Panics(t, func() { + matcher.addPrefix("a", false, &literalPrefixInsensitiveStringMatcher{ + prefix: "a", + }) + }) + }) + + t.Run("should panic if the matcher is not case sensitive but the prefix is case sensitive", func(t *testing.T) { + matcher := newEqualMultiStringMatcher(false, 0, 1, 1) + + require.Panics(t, func() { + matcher.addPrefix("a", true, &literalPrefixSensitiveStringMatcher{ + prefix: "a", + }) + }) + }) +} + func TestEqualMultiStringMatcher_Matches(t *testing.T) { tests := map[string]struct { values []string + prefixes []StringMatcher caseSensitive bool expectedMatches []string expectedNotMatches []string @@ -848,6 +903,24 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) { expectedMatches: []string{"a", "A", "b", "B"}, expectedNotMatches: []string{"c", "C"}, }, + "few case sensitive prefixes": { + prefixes: []StringMatcher{ + &literalPrefixSensitiveStringMatcher{prefix: "a", right: anyStringWithoutNewlineMatcher{}}, + &literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}, + }, + caseSensitive: true, + expectedMatches: []string{"a", "aX", "B", "BX"}, + expectedNotMatches: []string{"A", "b"}, + }, + "few case insensitive prefixes": { + prefixes: []StringMatcher{ + &literalPrefixInsensitiveStringMatcher{prefix: "a", right: anyStringWithoutNewlineMatcher{}}, + &literalPrefixInsensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}, + }, + caseSensitive: false, + expectedMatches: []string{"a", "aX", "A", "AX", "b", "bX", "B", "BX"}, + expectedNotMatches: []string{"c", "cX", "C", "CX"}, + }, "many case sensitive values": { values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, caseSensitive: true, @@ -860,14 +933,37 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) { expectedMatches: []string{"a", "A", "b", "B"}, expectedNotMatches: []string{"x", "X"}, }, + "mixed values and prefixes": { + values: []string{"a"}, + prefixes: []StringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}}, + caseSensitive: true, + expectedMatches: []string{"a", "B", "BX"}, + expectedNotMatches: []string{"aX", "A", "b", "bX"}, + }, } for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values)) + // To keep this test simple, we always assume a min prefix length of 1. + minPrefixLength := 0 + if len(testData.prefixes) > 0 { + minPrefixLength = 1 + } + + matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values), len(testData.prefixes), minPrefixLength) for _, v := range testData.values { matcher.add(v) } + for _, p := range testData.prefixes { + switch m := p.(type) { + case *literalPrefixSensitiveStringMatcher: + matcher.addPrefix(m.prefix, true, p) + case *literalPrefixInsensitiveStringMatcher: + matcher.addPrefix(m.prefix, false, p) + default: + panic("Unexpected type in test case") + } + } for _, v := range testData.expectedMatches { require.True(t, matcher.Matches(v), "value: %s", v) @@ -879,29 +975,33 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) { } } -func TestFindEqualStringMatchers(t *testing.T) { +func TestFindEqualOrPrefixStringMatchers(t *testing.T) { type match struct { s string caseSensitive bool } - // Utility to call findEqualStringMatchers() and collect all callback invocations. - findEqualStringMatchersAndCollectMatches := func(input StringMatcher) (matches []match, ok bool) { - ok = findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool { + // Utility to call findEqualOrPrefixStringMatchers() and collect all callback invocations. + findEqualOrPrefixStringMatchersAndCollectMatches := func(input StringMatcher) (matches []match, ok bool) { + ok = findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { matches = append(matches, match{matcher.s, matcher.caseSensitive}) return true + }, func(prefix string, prefixCaseSensitive bool, right StringMatcher) bool { + matches = append(matches, match{prefix, prefixCaseSensitive}) + return true }) + return } t.Run("empty matcher", func(t *testing.T) { - actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(emptyStringMatcher{}) + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(emptyStringMatcher{}) require.False(t, actualOk) require.Empty(t, actualMatches) }) t.Run("concat of literal matchers (case sensitive)", func(t *testing.T) { - actualMatches, actualOk := findEqualStringMatchersAndCollectMatches( + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( orStringMatcher{ &equalStringMatcher{s: "test-1", caseSensitive: true}, &equalStringMatcher{s: "test-2", caseSensitive: true}, @@ -913,7 +1013,7 @@ func TestFindEqualStringMatchers(t *testing.T) { }) t.Run("concat of literal matchers (case insensitive)", func(t *testing.T) { - actualMatches, actualOk := findEqualStringMatchersAndCollectMatches( + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( orStringMatcher{ &equalStringMatcher{s: "test-1", caseSensitive: false}, &equalStringMatcher{s: "test-2", caseSensitive: false}, @@ -925,7 +1025,7 @@ func TestFindEqualStringMatchers(t *testing.T) { }) t.Run("concat of literal matchers (mixed case)", func(t *testing.T) { - actualMatches, actualOk := findEqualStringMatchersAndCollectMatches( + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( orStringMatcher{ &equalStringMatcher{s: "test-1", caseSensitive: false}, &equalStringMatcher{s: "test-2", caseSensitive: true}, @@ -935,11 +1035,59 @@ func TestFindEqualStringMatchers(t *testing.T) { require.True(t, actualOk) require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches) }) + + t.Run("concat of literal prefix matchers (case sensitive)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &literalPrefixSensitiveStringMatcher{prefix: "test-1"}, + &literalPrefixSensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches) + }) + + t.Run("concat of literal prefix matchers (case insensitive)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &literalPrefixInsensitiveStringMatcher{prefix: "test-1"}, + &literalPrefixInsensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", false}, {"test-2", false}}, actualMatches) + }) + + t.Run("concat of literal prefix matchers (mixed case)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &literalPrefixInsensitiveStringMatcher{prefix: "test-1"}, + &literalPrefixSensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches) + }) + + t.Run("concat of literal string and prefix matchers (case sensitive)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &equalStringMatcher{s: "test-1", caseSensitive: true}, + &literalPrefixSensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches) + }) } // This benchmark is used to find a good threshold to use to apply the optimization -// done by optimizeEqualStringMatchers(). -func BenchmarkOptimizeEqualStringMatchers(b *testing.B) { +// done by optimizeEqualOrPrefixStringMatchers(). +func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) { randGenerator := rand.New(rand.NewSource(time.Now().UnixNano())) // Generate variable lengths random texts to match against. @@ -949,42 +1097,51 @@ func BenchmarkOptimizeEqualStringMatchers(b *testing.B) { for numAlternations := 2; numAlternations <= 256; numAlternations *= 2 { for _, caseSensitive := range []bool{true, false} { - b.Run(fmt.Sprintf("alternations: %d case sensitive: %t", numAlternations, caseSensitive), func(b *testing.B) { - // Generate a regex with the expected number of alternations. - re := strings.Join(randStrings(randGenerator, numAlternations, 10), "|") - if !caseSensitive { - re = "(?i:(" + re + "))" - } - - parsed, err := syntax.Parse(re, syntax.Perl) - require.NoError(b, err) - - unoptimized := stringMatcherFromRegexpInternal(parsed) - require.IsType(b, orStringMatcher{}, unoptimized) - - optimized := optimizeEqualStringMatchers(unoptimized, 0) - if numAlternations < minEqualMultiStringMatcherMapThreshold { - require.IsType(b, &equalMultiStringSliceMatcher{}, optimized) - } else { - require.IsType(b, &equalMultiStringMapMatcher{}, optimized) - } - - b.Run("without optimizeEqualStringMatchers()", func(b *testing.B) { - for n := 0; n < b.N; n++ { - for _, t := range texts { - unoptimized.Matches(t) - } + for _, prefixMatcher := range []bool{true, false} { + b.Run(fmt.Sprintf("alternations: %d case sensitive: %t prefix matcher: %t", numAlternations, caseSensitive, prefixMatcher), func(b *testing.B) { + // If the test should run on prefix matchers, we add a wildcard matcher as suffix (prefix will be a literal). + suffix := "" + if prefixMatcher { + suffix = ".*" } - }) - b.Run("with optimizeEqualStringMatchers()", func(b *testing.B) { - for n := 0; n < b.N; n++ { - for _, t := range texts { - optimized.Matches(t) - } + // Generate a regex with the expected number of alternations. + re := strings.Join(randStringsWithSuffix(randGenerator, numAlternations, 10, suffix), "|") + if !caseSensitive { + re = "(?i:(" + re + "))" } + b.Logf("regexp: %s", re) + + parsed, err := syntax.Parse(re, syntax.Perl|syntax.DotNL) + require.NoError(b, err) + + unoptimized := stringMatcherFromRegexpInternal(parsed) + require.IsType(b, orStringMatcher{}, unoptimized) + + optimized := optimizeEqualOrPrefixStringMatchers(unoptimized, 0) + if numAlternations < minEqualMultiStringMatcherMapThreshold && !prefixMatcher { + require.IsType(b, &equalMultiStringSliceMatcher{}, optimized) + } else { + require.IsType(b, &equalMultiStringMapMatcher{}, optimized) + } + + b.Run("without optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) { + for n := 0; n < b.N; n++ { + for _, t := range texts { + unoptimized.Matches(t) + } + } + }) + + b.Run("with optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) { + for n := 0; n < b.N; n++ { + for _, t := range texts { + optimized.Matches(t) + } + } + }) }) - }) + } } } } @@ -1204,10 +1361,16 @@ func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatch } // No nested matchers for the following ones. + case *equalMultiStringMapMatcher: + for _, prefixes := range casted.prefixes { + for _, matcher := range prefixes { + visitStringMatcher(matcher, callback) + } + } + case emptyStringMatcher: case *equalStringMatcher: case *equalMultiStringSliceMatcher: - case *equalMultiStringMapMatcher: case anyStringWithoutNewlineMatcher: case *anyNonEmptyStringMatcher: case trueMatcher: diff --git a/model/labels/sharding.go b/model/labels/sharding.go index 5e3e89fbbb..8b3a369397 100644 --- a/model/labels/sharding.go +++ b/model/labels/sharding.go @@ -39,9 +39,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b) } diff --git a/model/labels/sharding_dedupelabels.go b/model/labels/sharding_dedupelabels.go index 5912724f9b..5bf41b05d6 100644 --- a/model/labels/sharding_dedupelabels.go +++ b/model/labels/sharding_dedupelabels.go @@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) pos = newPos } return xxhash.Sum64(b) diff --git a/model/labels/sharding_stringlabels.go b/model/labels/sharding_stringlabels.go index 3ad2027d8c..798f268eb9 100644 --- a/model/labels/sharding_stringlabels.go +++ b/model/labels/sharding_stringlabels.go @@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } if h != nil { return h.Sum64() diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index 4f33edda43..eb79f7be21 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -171,7 +171,7 @@ type Regexp struct { // NewRegexp creates a new anchored Regexp and returns an error if the // passed-in regular expression does not compile. func NewRegexp(s string) (Regexp, error) { - regex, err := regexp.Compile("^(?:" + s + ")$") + regex, err := regexp.Compile("^(?s:" + s + ")$") return Regexp{Regexp: regex}, err } @@ -213,9 +213,13 @@ func (re Regexp) IsZero() bool { // String returns the original string used to compile the regular expression. func (re Regexp) String() string { + if re.Regexp == nil { + return "" + } + str := re.Regexp.String() - // Trim the anchor `^(?:` prefix and `)$` suffix. - return str[4 : len(str)-2] + // Trim the anchor `^(?s:` prefix and `)$` suffix. + return str[5 : len(str)-2] } // Process returns a relabeled version of the given label set. The relabel configurations @@ -273,6 +277,13 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return false } case Replace: + // Fast path to add or delete label pair. + if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && + !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { + lb.Set(cfg.TargetLabel, cfg.Replacement) + break + } + indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. if indexes == nil { @@ -322,3 +333,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return true } + +func varInRegexTemplate(template string) bool { + return strings.Contains(template, "$") +} diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index 0f11f7068d..0c6d41f5e3 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -569,6 +569,29 @@ func TestRelabel(t *testing.T) { }, drop: true, }, + { + input: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("line1.*line2"), + TargetLabel: "d", + Separator: ";", + Replacement: "match${1}", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + "d": "match", + }), + }, } for _, test := range tests { @@ -838,6 +861,34 @@ func BenchmarkRelabel(b *testing.B) { "__scrape_timeout__", "10s", "job", "kubernetes-pods"), }, + { + name: "static label pair", + config: ` + - replacement: wwwwww + target_label: wwwwww + - replacement: yyyyyyyyyyyy + target_label: xxxxxxxxx + - replacement: xxxxxxxxx + target_label: yyyyyyyyyyyy + - source_labels: ["something"] + target_label: with_source_labels + replacement: value + - replacement: dropped + target_label: ${0} + - replacement: ${0} + target_label: dropped`, + lbls: labels.FromStrings( + "abcdefg01", "hijklmn1", + "abcdefg02", "hijklmn2", + "abcdefg03", "hijklmn3", + "abcdefg04", "hijklmn4", + "abcdefg05", "hijklmn5", + "abcdefg06", "hijklmn6", + "abcdefg07", "hijklmn7", + "abcdefg08", "hijklmn8", + "job", "foo", + ), + }, } for i := range tests { err := yaml.UnmarshalStrict([]byte(tests[i].config), &tests[i].cfgs) @@ -900,3 +951,16 @@ action: replace }) } } + +func TestRegexp_ShouldMarshalAndUnmarshalZeroValue(t *testing.T) { + var zero Regexp + + marshalled, err := yaml.Marshal(&zero) + require.NoError(t, err) + require.Equal(t, "null\n", string(marshalled)) + + var unmarshalled Regexp + err = yaml.Unmarshal(marshalled, &unmarshalled) + require.NoError(t, err) + require.Nil(t, unmarshalled.Regexp) +} diff --git a/model/textparse/interface.go b/model/textparse/interface.go index df01dbc34f..0b5d9281e4 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -106,8 +106,8 @@ const ( EntryInvalid Entry = -1 EntryType Entry = 0 EntryHelp Entry = 1 - EntrySeries Entry = 2 // A series with a simple float64 as value. + EntrySeries Entry = 2 // EntrySeries marks a series with a simple float64 as value. EntryComment Entry = 3 EntryUnit Entry = 4 - EntryHistogram Entry = 5 // A series with a native histogram as a value. + EntryHistogram Entry = 5 // EntryHistogram marks a series with a native histogram as a value. ) diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index b7ad1dd85c..ea7607c3a7 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -94,16 +94,46 @@ type OpenMetricsParser struct { exemplarVal float64 exemplarTs int64 hasExemplarTs bool + + skipCTSeries bool } -// NewOpenMetricsParser returns a new parser of the byte slice. -func NewOpenMetricsParser(b []byte, st *labels.SymbolTable) Parser { - return &OpenMetricsParser{ - l: &openMetricsLexer{b: b}, - builder: labels.NewScratchBuilderWithSymbolTable(st, 16), +type openMetricsParserOptions struct { + SkipCTSeries bool +} + +type OpenMetricsOption func(*openMetricsParserOptions) + +// WithOMParserCTSeriesSkipped turns off exposing _created lines +// as series, which makes those only used for parsing created timestamp +// for `CreatedTimestamp` method purposes. +// +// It's recommended to use this option to avoid using _created lines for other +// purposes than created timestamp, but leave false by default for the +// best-effort compatibility. +func WithOMParserCTSeriesSkipped() OpenMetricsOption { + return func(o *openMetricsParserOptions) { + o.SkipCTSeries = true } } +// NewOpenMetricsParser returns a new parser for the byte slice with option to skip CT series parsing. +func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsOption) Parser { + options := &openMetricsParserOptions{} + + for _, opt := range opts { + opt(options) + } + + parser := &OpenMetricsParser{ + l: &openMetricsLexer{b: b}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + skipCTSeries: options.SkipCTSeries, + } + + return parser +} + // Series returns the bytes of the series, the timestamp if set, and the value // of the current sample. func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { @@ -219,10 +249,90 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { return true } -// CreatedTimestamp returns nil as it's not implemented yet. -// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 +// CreatedTimestamp returns the created timestamp for a current Metric if exists or nil. +// NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. func (p *OpenMetricsParser) CreatedTimestamp() *int64 { - return nil + if !typeRequiresCT(p.mtype) { + // Not a CT supported metric type, fast path. + return nil + } + + var ( + currLset labels.Labels + buf []byte + peekWithoutNameLsetHash uint64 + ) + p.Metric(&currLset) + currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") + // Search for the _created line for the currFamilyLsetHash using ephemeral parser until + // we see EOF or new metric family. We have to do it as we don't know where (and if) + // that CT line is. + // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + peek := deepCopy(p) + for { + eType, err := peek.Next() + if err != nil { + // This means peek will give error too later on, so def no CT line found. + // This might result in partial scrape with wrong/missing CT, but only + // spec improvement would help. + // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + return nil + } + if eType != EntrySeries { + // Assume we hit different family, no CT line found. + return nil + } + + var peekedLset labels.Labels + peek.Metric(&peekedLset) + peekedName := peekedLset.Get(model.MetricNameLabel) + if !strings.HasSuffix(peekedName, "_created") { + // Not a CT line, search more. + continue + } + + // We got a CT line here, but let's search if CT line is actually for our series, edge case. + peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") + if peekWithoutNameLsetHash != currFamilyLsetHash { + // CT line for a different series, for our series no CT. + return nil + } + ct := int64(peek.val) + return &ct + } +} + +// typeRequiresCT returns true if the metric type requires a _created timestamp. +func typeRequiresCT(t model.MetricType) bool { + switch t { + case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: + return true + default: + return false + } +} + +// deepCopy creates a copy of a parser without re-using the slices' original memory addresses. +func deepCopy(p *OpenMetricsParser) OpenMetricsParser { + newB := make([]byte, len(p.l.b)) + copy(newB, p.l.b) + + newLexer := &openMetricsLexer{ + b: newB, + i: p.l.i, + start: p.l.start, + err: p.l.err, + state: p.l.state, + } + + newParser := OpenMetricsParser{ + l: newLexer, + builder: p.builder, + mtype: p.mtype, + val: p.val, + skipCTSeries: false, + } + return newParser } // nextToken returns the next token from the openMetricsLexer. @@ -337,7 +447,13 @@ func (p *OpenMetricsParser) Next() (Entry, error) { } p.series = p.l.b[p.start:p.l.i] - return p.parseMetricSuffix(p.nextToken()) + if err := p.parseSeriesEndOfLine(p.nextToken()); err != nil { + return EntryInvalid, err + } + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() + } + return EntrySeries, nil case tMName: p.offsets = append(p.offsets, p.start, p.l.i) p.series = p.l.b[p.start:p.l.i] @@ -351,8 +467,14 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.series = p.l.b[p.start:p.l.i] t2 = p.nextToken() } - return p.parseMetricSuffix(t2) + if err := p.parseSeriesEndOfLine(t2); err != nil { + return EntryInvalid, err + } + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() + } + return EntrySeries, nil default: err = p.parseError("expected a valid start token", t) } @@ -467,51 +589,64 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e } } -// parseMetricSuffix parses the end of the line after the metric name and -// labels. It starts parsing with the provided token. -func (p *OpenMetricsParser) parseMetricSuffix(t token) (Entry, error) { +// isCreatedSeries returns true if the current series is a _created series. +func (p *OpenMetricsParser) isCreatedSeries() bool { + var newLbs labels.Labels + p.Metric(&newLbs) + name := newLbs.Get(model.MetricNameLabel) + if typeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") { + return true + } + return false +} + +// parseSeriesEndOfLine parses the series end of the line (value, optional +// timestamp, commentary, etc.) after the metric name and labels. +// It starts parsing with the provided token. +func (p *OpenMetricsParser) parseSeriesEndOfLine(t token) error { if p.offsets[0] == -1 { - return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) + return fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) } var err error p.val, err = p.getFloatValue(t, "metric") if err != nil { - return EntryInvalid, err + return err } p.hasTS = false switch t2 := p.nextToken(); t2 { case tEOF: - return EntryInvalid, errors.New("data does not end with # EOF") + return errors.New("data does not end with # EOF") case tLinebreak: break case tComment: if err := p.parseComment(); err != nil { - return EntryInvalid, err + return err } case tTimestamp: p.hasTS = true var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { - return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) + return fmt.Errorf("invalid timestamp %f", ts) } p.ts = int64(ts * 1000) switch t3 := p.nextToken(); t3 { case tLinebreak: case tComment: if err := p.parseComment(); err != nil { - return EntryInvalid, err + return err } default: - return EntryInvalid, p.parseError("expected next entry after timestamp", t3) + return p.parseError("expected next entry after timestamp", t3) } } - return EntrySeries, nil + + return nil } func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index bc76a540d3..cadaabc99f 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -14,6 +14,7 @@ package textparse import ( + "errors" "io" "testing" @@ -24,6 +25,8 @@ import ( "github.com/prometheus/prometheus/model/labels" ) +func int64p(x int64) *int64 { return &x } + func TestOpenMetricsParse(t *testing.T) { input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary @@ -63,15 +66,34 @@ ss{A="a"} 0 _metric_starting_with_underscore 1 testmetric{_label_starting_with_underscore="foo"} 1 testmetric{label="\"bar\""} 1 +# HELP foo Counter with and without labels to certify CT is parsed for both cases # TYPE foo counter -foo_total 17.0 1520879607.789 # {id="counter-test"} 5` +foo_total 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created 1000 +foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created{a="b"} 1000 +# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far +# TYPE bar summary +bar_count 17.0 +bar_sum 324789.3 +bar{quantile="0.95"} 123.7 +bar{quantile="0.99"} 150.0 +bar_created 1520430000 +# HELP baz Histogram with the same objective as above's summary +# TYPE baz histogram +baz_bucket{le="0.0"} 0 +baz_bucket{le="+Inf"} 17 +baz_count 17 +baz_sum 324789.3 +baz_created 1520430000 +# HELP fizz_created Gauge which shouldn't be parsed as CT +# TYPE fizz_created gauge +fizz_created 17.0` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\n# EOF\n" - int64p := func(x int64) *int64 { return &x } - exp := []expectedParse{ { m: "go_gc_duration_seconds", @@ -216,6 +238,9 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` m: "testmetric{label=\"\\\"bar\\\"\"}", v: 1, lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), + }, { + m: "foo", + help: "Counter with and without labels to certify CT is parsed for both cases", }, { m: "foo", typ: model.MetricTypeCounter, @@ -225,6 +250,76 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` lset: labels.FromStrings("__name__", "foo_total"), t: int64p(1520879607789), e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + ct: int64p(1000), + }, { + m: `foo_total{a="b"}`, + v: 17.0, + lset: labels.FromStrings("__name__", "foo_total", "a", "b"), + t: int64p(1520879607789), + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + ct: int64p(1000), + }, { + m: "bar", + help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", + }, { + m: "bar", + typ: model.MetricTypeSummary, + }, { + m: "bar_count", + v: 17.0, + lset: labels.FromStrings("__name__", "bar_count"), + ct: int64p(1520430000), + }, { + m: "bar_sum", + v: 324789.3, + lset: labels.FromStrings("__name__", "bar_sum"), + ct: int64p(1520430000), + }, { + m: `bar{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), + ct: int64p(1520430000), + }, { + m: `bar{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), + ct: int64p(1520430000), + }, { + m: "baz", + help: "Histogram with the same objective as above's summary", + }, { + m: "baz", + typ: model.MetricTypeHistogram, + }, { + m: `baz_bucket{le="0.0"}`, + v: 0, + lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), + ct: int64p(1520430000), + }, { + m: `baz_bucket{le="+Inf"}`, + v: 17, + lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), + ct: int64p(1520430000), + }, { + m: `baz_count`, + v: 17, + lset: labels.FromStrings("__name__", "baz_count"), + ct: int64p(1520430000), + }, { + m: `baz_sum`, + v: 324789.3, + lset: labels.FromStrings("__name__", "baz_sum"), + ct: int64p(1520430000), + }, { + m: "fizz_created", + help: "Gauge which shouldn't be parsed as CT", + }, { + m: "fizz_created", + typ: model.MetricTypeGauge, + }, { + m: `fizz_created`, + v: 17, + lset: labels.FromStrings("__name__", "fizz_created"), }, { m: "metric", help: "foo\x00bar", @@ -235,8 +330,8 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` }, } - p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + checkParseResultsWithCT(t, p, exp, true) } func TestUTF8OpenMetricsParse(t *testing.T) { @@ -251,6 +346,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) { # UNIT "go.gc_duration_seconds" seconds {"go.gc_duration_seconds",quantile="0"} 4.9351e-05 {"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05 +{"go.gc_duration_seconds_created"} 12313 {"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 @@ -274,10 +370,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), + ct: int64p(12313), }, { m: `{"go.gc_duration_seconds",quantile="0.25"}`, v: 7.424100000000001e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), + ct: int64p(12313), }, { m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, v: 8.3835e-05, @@ -306,8 +404,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"), }, } - p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + checkParseResultsWithCT(t, p, exp, true) } func TestOpenMetricsParseErrors(t *testing.T) { @@ -598,10 +696,6 @@ func TestOpenMetricsParseErrors(t *testing.T) { input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf", err: `invalid exemplar timestamp -Inf`, }, - { - input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf", - err: `invalid exemplar timestamp +Inf`, - }, } for i, c := range cases { @@ -684,3 +778,217 @@ func TestOMNullByteHandling(t *testing.T) { require.Equal(t, c.err, err.Error(), "test %d", i) } } + +// While not desirable, there are cases were CT fails to parse and +// these tests show them. +// TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. +func TestCTParseFailures(t *testing.T) { + input := `# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 17 +something_sum 324789.3 +something_created 1520430001 +something_bucket{le="0.0"} 0 +something_bucket{le="+Inf"} 17 +# HELP thing Histogram with _created as first line +# TYPE thing histogram +thing_created 1520430002 +thing_count 17 +thing_sum 324789.3 +thing_bucket{le="0.0"} 0 +thing_bucket{le="+Inf"} 17 +# HELP yum Summary with _created between sum and quantiles +# TYPE yum summary +yum_count 17.0 +yum_sum 324789.3 +yum_created 1520430003 +yum{quantile="0.95"} 123.7 +yum{quantile="0.99"} 150.0 +# HELP foobar Summary with _created as the first line +# TYPE foobar summary +foobar_created 1520430004 +foobar_count 17.0 +foobar_sum 324789.3 +foobar{quantile="0.95"} 123.7 +foobar{quantile="0.99"} 150.0` + + input += "\n# EOF\n" + + int64p := func(x int64) *int64 { return &x } + + type expectCT struct { + m string + ct *int64 + typ model.MetricType + help string + isErr bool + } + + exp := []expectCT{ + { + m: "something", + help: "Histogram with _created between buckets and summary", + isErr: false, + }, { + m: "something", + typ: model.MetricTypeHistogram, + isErr: false, + }, { + m: `something_count`, + ct: int64p(1520430001), + isErr: false, + }, { + m: `something_sum`, + ct: int64p(1520430001), + isErr: false, + }, { + m: `something_bucket{le="0.0"}`, + ct: int64p(1520430001), + isErr: true, + }, { + m: `something_bucket{le="+Inf"}`, + ct: int64p(1520430001), + isErr: true, + }, { + m: "thing", + help: "Histogram with _created as first line", + isErr: false, + }, { + m: "thing", + typ: model.MetricTypeHistogram, + isErr: false, + }, { + m: `thing_count`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_sum`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_bucket{le="0.0"}`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_bucket{le="+Inf"}`, + ct: int64p(1520430002), + isErr: true, + }, { + m: "yum", + help: "Summary with _created between summary and quantiles", + isErr: false, + }, { + m: "yum", + typ: model.MetricTypeSummary, + isErr: false, + }, { + m: "yum_count", + ct: int64p(1520430003), + isErr: false, + }, { + m: "yum_sum", + ct: int64p(1520430003), + isErr: false, + }, { + m: `yum{quantile="0.95"}`, + ct: int64p(1520430003), + isErr: true, + }, { + m: `yum{quantile="0.99"}`, + ct: int64p(1520430003), + isErr: true, + }, { + m: "foobar", + help: "Summary with _created as the first line", + isErr: false, + }, { + m: "foobar", + typ: model.MetricTypeSummary, + isErr: false, + }, { + m: "foobar_count", + ct: int64p(1520430004), + isErr: true, + }, { + m: "foobar_sum", + ct: int64p(1520430004), + isErr: true, + }, { + m: `foobar{quantile="0.95"}`, + ct: int64p(1520430004), + isErr: true, + }, { + m: `foobar{quantile="0.99"}`, + ct: int64p(1520430004), + isErr: true, + }, + } + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + i := 0 + + var res labels.Labels + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + switch et { + case EntrySeries: + p.Metric(&res) + + if ct := p.CreatedTimestamp(); exp[i].isErr { + require.Nil(t, ct) + } else { + require.Equal(t, *exp[i].ct, *ct) + } + default: + i++ + continue + } + i++ + } +} + +func TestDeepCopy(t *testing.T) { + input := []byte(`# HELP go_goroutines A gauge goroutines. +# TYPE go_goroutines gauge +go_goroutines 33 123.123 +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds +go_gc_duration_seconds_created`) + + st := labels.NewSymbolTable() + parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser) + + // Modify the original parser state + _, err := parser.Next() + require.NoError(t, err) + require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) + require.True(t, parser.skipCTSeries) + + // Create a deep copy of the parser + copyParser := deepCopy(parser) + etype, err := copyParser.Next() + require.NoError(t, err) + require.Equal(t, EntryType, etype) + require.True(t, parser.skipCTSeries) + require.False(t, copyParser.skipCTSeries) + + // Modify the original parser further + parser.Next() + parser.Next() + parser.Next() + require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) + require.Equal(t, "summary", string(parser.mtype)) + require.False(t, copyParser.skipCTSeries) + require.True(t, parser.skipCTSeries) + + // Ensure the copy remains unchanged + copyParser.Next() + copyParser.Next() + require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]])) + require.False(t, copyParser.skipCTSeries) +} diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index a611f3aea7..5759769279 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -502,7 +502,7 @@ func unreplace(s string) string { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } func parseFloat(s string) (float64, error) { diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 66986291d7..ce9daf53e0 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -18,6 +18,7 @@ import ( "errors" "io" "os" + "strings" "testing" "github.com/klauspost/compress/gzip" @@ -41,6 +42,7 @@ type expectedParse struct { unit string comment string e *exemplar.Exemplar + ct *int64 } func TestPromParse(t *testing.T) { @@ -138,7 +140,7 @@ testmetric{label="\"bar\""} 1` v: 8.3835e-05, lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), }, { - // NOTE: Unlike OpenMetrics, Promparse allows spaces between label terms. This appears to be unintended and should probably be fixed. + // NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed. m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`, v: 8.3835e-05, lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"), @@ -188,6 +190,10 @@ testmetric{label="\"bar\""} 1` } func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { + checkParseResultsWithCT(t, p, exp, false) +} + +func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLinesRemoved bool) { i := 0 var res labels.Labels @@ -205,6 +211,14 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { p.Metric(&res) + if ctLinesRemoved { + // Are CT series skipped? + _, typ := p.Type() + if typeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") { + t.Fatalf("we exped created lines skipped") + } + } + require.Equal(t, exp[i].m, string(m)) require.Equal(t, exp[i].t, ts) require.Equal(t, exp[i].v, v) @@ -218,6 +232,11 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { require.True(t, found) testutil.RequireEqual(t, *exp[i].e, e) } + if ct := p.CreatedTimestamp(); ct != nil { + require.Equal(t, *exp[i].ct, *ct) + } else { + require.Nil(t, exp[i].ct) + } case EntryType: m, typ := p.Type() @@ -475,8 +494,10 @@ const ( func BenchmarkParse(b *testing.B) { for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ - "prometheus": NewPromParser, - "openmetrics": NewOpenMetricsParser, + "prometheus": NewPromParser, + "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st) + }, } { for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { f, err := os.Open(fn) diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index ea3a2e1a34..9f1400cee3 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -47,7 +47,7 @@ import ( // the re-arrangement work is actually causing problems (which has to be seen), // that expectation needs to be changed. type ProtobufParser struct { - in []byte // The intput to parse. + in []byte // The input to parse. inPos int // Position within the input. metricPos int // Position within Metric slice. // fieldPos is the position within a Summary or (legacy) Histogram. -2 @@ -71,7 +71,7 @@ type ProtobufParser struct { mf *dto.MetricFamily - // Wether to also parse a classic histogram that is also present as a + // Whether to also parse a classic histogram that is also present as a // native histogram. parseClassicHistograms bool @@ -409,6 +409,7 @@ func (p *ProtobufParser) Next() (Entry, error) { switch p.state { case EntryInvalid: p.metricPos = 0 + p.exemplarPos = 0 p.fieldPos = -2 n, err := readDelimited(p.in[p.inPos:], p.mf) p.inPos += n @@ -485,6 +486,7 @@ func (p *ProtobufParser) Next() (Entry, error) { p.metricPos++ p.fieldPos = -2 p.fieldsDone = false + p.exemplarPos = 0 // If this is a metric family containing native // histograms, we have to switch back to native // histograms after parsing a classic histogram. @@ -602,7 +604,7 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) } -// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// formatOpenMetricsFloat works like the usual Go string formatting of a float // but appends ".0" if the resulting number would otherwise contain neither a // "." nor an "e". func formatOpenMetricsFloat(f float64) string { diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index e323a6cc8f..cf34ae52df 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -695,6 +695,70 @@ metric: < timestamp_ms: 1234568 > +`, + + `name: "test_histogram_with_native_histogram_exemplars2" +help: "Another histogram with native histogram exemplars." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + exemplars: < + label: < + name: "dummyID" + value: "59780" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + timestamp_ms: 1234568 +> + `, } @@ -1276,6 +1340,41 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, }, + { + m: "test_histogram_with_native_histogram_exemplars2", + help: "Another histogram with native histogram exemplars.", + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, }, }, { @@ -1995,15 +2094,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "without_quantiles_sum", ), }, - { // 78 + { // 81 m: "empty_histogram", help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", }, - { // 79 + { // 82 m: "empty_histogram", typ: model.MetricTypeHistogram, }, - { // 80 + { // 83 m: "empty_histogram", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -2014,15 +2113,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "empty_histogram", ), }, - { // 81 + { // 84 m: "test_counter_with_createdtimestamp", help: "A counter with a created timestamp.", }, - { // 82 + { // 85 m: "test_counter_with_createdtimestamp", typ: model.MetricTypeCounter, }, - { // 83 + { // 86 m: "test_counter_with_createdtimestamp", v: 42, ct: 1000, @@ -2030,15 +2129,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_counter_with_createdtimestamp", ), }, - { // 84 + { // 87 m: "test_summary_with_createdtimestamp", help: "A summary with a created timestamp.", }, - { // 85 + { // 88 m: "test_summary_with_createdtimestamp", typ: model.MetricTypeSummary, }, - { // 86 + { // 89 m: "test_summary_with_createdtimestamp_count", v: 42, ct: 1000, @@ -2046,7 +2145,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_summary_with_createdtimestamp_count", ), }, - { // 87 + { // 90 m: "test_summary_with_createdtimestamp_sum", v: 1.234, ct: 1000, @@ -2054,15 +2153,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_summary_with_createdtimestamp_sum", ), }, - { // 88 + { // 91 m: "test_histogram_with_createdtimestamp", help: "A histogram with a created timestamp.", }, - { // 89 + { // 92 m: "test_histogram_with_createdtimestamp", typ: model.MetricTypeHistogram, }, - { // 90 + { // 93 m: "test_histogram_with_createdtimestamp", ct: 1000, shs: &histogram.Histogram{ @@ -2074,15 +2173,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_createdtimestamp", ), }, - { // 91 + { // 94 m: "test_gaugehistogram_with_createdtimestamp", help: "A gauge histogram with a created timestamp.", }, - { // 92 + { // 95 m: "test_gaugehistogram_with_createdtimestamp", typ: model.MetricTypeGaugeHistogram, }, - { // 93 + { // 96 m: "test_gaugehistogram_with_createdtimestamp", ct: 1000, shs: &histogram.Histogram{ @@ -2094,15 +2193,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_gaugehistogram_with_createdtimestamp", ), }, - { // 94 + { // 97 m: "test_histogram_with_native_histogram_exemplars", help: "A histogram with native histogram exemplars.", }, - { // 95 + { // 98 m: "test_histogram_with_native_histogram_exemplars", typ: model.MetricTypeHistogram, }, - { // 96 + { // 99 m: "test_histogram_with_native_histogram_exemplars", t: 1234568, shs: &histogram.Histogram{ @@ -2130,7 +2229,7 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, }, - { // 97 + { // 100 m: "test_histogram_with_native_histogram_exemplars_count", t: 1234568, v: 175, @@ -2138,7 +2237,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_native_histogram_exemplars_count", ), }, - { // 98 + { // 101 m: "test_histogram_with_native_histogram_exemplars_sum", t: 1234568, v: 0.0008280461746287094, @@ -2146,7 +2245,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_native_histogram_exemplars_sum", ), }, - { // 99 + { // 102 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998", t: 1234568, v: 2, @@ -2155,7 +2254,7 @@ func TestProtobufParse(t *testing.T) { "le", "-0.0004899999999999998", ), }, - { // 100 + { // 103 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998", t: 1234568, v: 4, @@ -2167,7 +2266,7 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 101 + { // 104 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998", t: 1234568, v: 16, @@ -2179,7 +2278,7 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 102 + { // 105 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf", t: 1234568, v: 175, @@ -2188,6 +2287,93 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, + { // 106 + m: "test_histogram_with_native_histogram_exemplars2", + help: "Another histogram with native histogram exemplars.", + }, + { // 107 + m: "test_histogram_with_native_histogram_exemplars2", + typ: model.MetricTypeHistogram, + }, + { // 108 + m: "test_histogram_with_native_histogram_exemplars2", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 109 + m: "test_histogram_with_native_histogram_exemplars2_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_count", + ), + }, + { // 110 + m: "test_histogram_with_native_histogram_exemplars2_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_sum", + ), + }, + { // 111 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 112 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0003899999999999998", + ), + }, + { // 113 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0002899999999999998", + ), + }, + { // 114 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "+Inf", + ), + }, }, }, } diff --git a/notifier/notifier.go b/notifier/notifier.go index 68b0d4961e..218e4cb8c7 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -674,7 +674,6 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b }() // Any HTTP status 2xx is OK. - //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return fmt.Errorf("bad response status %s", resp.Status) } diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index 2cdaa9e06d..68dd445811 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -711,7 +711,7 @@ func TestHangingNotifier(t *testing.T) { ) var ( - sendTimeout = 10 * time.Millisecond + sendTimeout = 100 * time.Millisecond sdUpdatert = sendTimeout / 2 done = make(chan struct{}) @@ -743,7 +743,7 @@ func TestHangingNotifier(t *testing.T) { // Initialize the discovery manager // This is relevant as the updates aren't sent continually in real life, but only each updatert. - // The old implementation of TestHangingNotifier didn't take that into acount. + // The old implementation of TestHangingNotifier didn't take that into account. ctx, cancel := context.WithCancel(context.Background()) defer cancel() reg := prometheus.NewRegistry() diff --git a/prompb/codec.go b/prompb/codec.go new file mode 100644 index 0000000000..ad30cd5e7b --- /dev/null +++ b/prompb/codec.go @@ -0,0 +1,201 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prompb + +import ( + "strings" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" +) + +// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon. + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels { + return labelProtosToLabels(b, m.GetLabels()) +} + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m ChunkedSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels { + return labelProtosToLabels(b, m.GetLabels()) +} + +func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []Label) labels.Labels { + b.Reset() + for _, l := range labelPairs { + b.Add(l.Name, l.Value) + } + b.Sort() + return b.Labels() +} + +// FromLabels transforms labels into prompb labels. The buffer slice +// will be used to avoid allocations if it is big enough to store the labels. +func FromLabels(lbls labels.Labels, buf []Label) []Label { + result := buf[:0] + lbls.Range(func(l labels.Label) { + result = append(result, Label{ + Name: l.Name, + Value: l.Value, + }) + }) + return result +} + +// FromMetadataType transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum. +func FromMetadataType(t model.MetricType) MetricMetadata_MetricType { + mt := strings.ToUpper(string(t)) + v, ok := MetricMetadata_MetricType_value[mt] + if !ok { + return MetricMetadata_UNKNOWN + } + return MetricMetadata_MetricType(v) +} + +// IsFloatHistogram returns true if the histogram is float. +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + +// ToIntHistogram returns integer Prometheus histogram from the remote implementation +// of integer histogram. If it's a float histogram, the method returns nil. +func (h Histogram) ToIntHistogram() *histogram.Histogram { + if h.IsFloatHistogram() { + return nil + } + return &histogram.Histogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountInt(), + Count: h.GetCountInt(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeDeltas(), + } +} + +// ToFloatHistogram returns float Prometheus histogram from the remote implementation +// of float histogram. If the underlying implementation is an integer histogram, a +// conversion is performed. +func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { + if h.IsFloatHistogram() { + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountFloat(), + Count: h.GetCountFloat(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveCounts(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeCounts(), + } + } + // Conversion from integer histogram. + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.GetZeroCountInt()), + Count: float64(h.GetCountInt()), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()), + } +} + +func spansProtoToSpans(s []BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func deltasToCounts(deltas []int64) []float64 { + counts := make([]float64, len(deltas)) + var cur float64 + for i, d := range deltas { + cur += float64(d) + counts[i] = cur + } + return counts +} + +// FromIntHistogram returns remote Histogram from the integer Histogram. +func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram { + return Histogram{ + Count: &Histogram_CountInt{CountInt: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, + NegativeSpans: spansToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: spansToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + ResetHint: Histogram_ResetHint(h.CounterResetHint), + Timestamp: timestamp, + } +} + +// FromFloatHistogram returns remote Histogram from the float Histogram. +func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram { + return Histogram{ + Count: &Histogram_CountFloat{CountFloat: fh.Count}, + Sum: fh.Sum, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, + NegativeSpans: spansToSpansProto(fh.NegativeSpans), + NegativeCounts: fh.NegativeBuckets, + PositiveSpans: spansToSpansProto(fh.PositiveSpans), + PositiveCounts: fh.PositiveBuckets, + ResetHint: Histogram_ResetHint(fh.CounterResetHint), + Timestamp: timestamp, + } +} + +func spansToSpansProto(s []histogram.Span) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +// ToExemplar converts remote exemplar to model exemplar. +func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, _ []string) exemplar.Exemplar { + timestamp := m.Timestamp + + return exemplar.Exemplar{ + Labels: labelProtosToLabels(b, m.GetLabels()), + Value: m.Value, + Ts: timestamp, + HasTs: timestamp != 0, + } +} diff --git a/prompb/custom.go b/prompb/custom.go index 13d6e0f0cd..f73ddd446b 100644 --- a/prompb/custom.go +++ b/prompb/custom.go @@ -17,14 +17,6 @@ import ( "sync" ) -func (m Sample) T() int64 { return m.Timestamp } -func (m Sample) V() float64 { return m.Value } - -func (h Histogram) IsFloatHistogram() bool { - _, ok := h.GetCount().(*Histogram_CountFloat) - return ok -} - func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { size := r.Size() data, ok := p.Get().(*[]byte) diff --git a/prompb/io/prometheus/write/v2/codec.go b/prompb/io/prometheus/write/v2/codec.go new file mode 100644 index 0000000000..25fa0d4035 --- /dev/null +++ b/prompb/io/prometheus/write/v2/codec.go @@ -0,0 +1,216 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" +) + +// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon. + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels { + return desymbolizeLabels(b, m.GetLabelsRefs(), symbols) +} + +// ToMetadata return model metadata from timeseries' remote metadata. +func (m TimeSeries) ToMetadata(symbols []string) metadata.Metadata { + typ := model.MetricTypeUnknown + switch m.Metadata.Type { + case Metadata_METRIC_TYPE_COUNTER: + typ = model.MetricTypeCounter + case Metadata_METRIC_TYPE_GAUGE: + typ = model.MetricTypeGauge + case Metadata_METRIC_TYPE_HISTOGRAM: + typ = model.MetricTypeHistogram + case Metadata_METRIC_TYPE_GAUGEHISTOGRAM: + typ = model.MetricTypeGaugeHistogram + case Metadata_METRIC_TYPE_SUMMARY: + typ = model.MetricTypeSummary + case Metadata_METRIC_TYPE_INFO: + typ = model.MetricTypeInfo + case Metadata_METRIC_TYPE_STATESET: + typ = model.MetricTypeStateset + } + return metadata.Metadata{ + Type: typ, + Unit: symbols[m.Metadata.UnitRef], + Help: symbols[m.Metadata.HelpRef], + } +} + +// FromMetadataType transforms a Prometheus metricType into writev2 metricType. +// Since the former is a string we need to transform it to an enum. +func FromMetadataType(t model.MetricType) Metadata_MetricType { + switch t { + case model.MetricTypeCounter: + return Metadata_METRIC_TYPE_COUNTER + case model.MetricTypeGauge: + return Metadata_METRIC_TYPE_GAUGE + case model.MetricTypeHistogram: + return Metadata_METRIC_TYPE_HISTOGRAM + case model.MetricTypeGaugeHistogram: + return Metadata_METRIC_TYPE_GAUGEHISTOGRAM + case model.MetricTypeSummary: + return Metadata_METRIC_TYPE_SUMMARY + case model.MetricTypeInfo: + return Metadata_METRIC_TYPE_INFO + case model.MetricTypeStateset: + return Metadata_METRIC_TYPE_STATESET + default: + return Metadata_METRIC_TYPE_UNSPECIFIED + } +} + +// IsFloatHistogram returns true if the histogram is float. +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + +// ToIntHistogram returns integer Prometheus histogram from the remote implementation +// of integer histogram. If it's a float histogram, the method returns nil. +func (h Histogram) ToIntHistogram() *histogram.Histogram { + if h.IsFloatHistogram() { + return nil + } + return &histogram.Histogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountInt(), + Count: h.GetCountInt(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeDeltas(), + CustomValues: h.GetCustomValues(), + } +} + +// ToFloatHistogram returns float Prometheus histogram from the remote implementation +// of float histogram. If the underlying implementation is an integer histogram, a +// conversion is performed. +func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { + if h.IsFloatHistogram() { + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountFloat(), + Count: h.GetCountFloat(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveCounts(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeCounts(), + CustomValues: h.GetCustomValues(), + } + } + // Conversion from integer histogram. + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.GetZeroCountInt()), + Count: float64(h.GetCountInt()), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()), + CustomValues: h.GetCustomValues(), + } +} + +func spansProtoToSpans(s []BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func deltasToCounts(deltas []int64) []float64 { + counts := make([]float64, len(deltas)) + var cur float64 + for i, d := range deltas { + cur += float64(d) + counts[i] = cur + } + return counts +} + +// FromIntHistogram returns remote Histogram from the integer Histogram. +func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram { + return Histogram{ + Count: &Histogram_CountInt{CountInt: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, + NegativeSpans: spansToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: spansToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + ResetHint: Histogram_ResetHint(h.CounterResetHint), + CustomValues: h.CustomValues, + Timestamp: timestamp, + } +} + +// FromFloatHistogram returns remote Histogram from the float Histogram. +func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram { + return Histogram{ + Count: &Histogram_CountFloat{CountFloat: fh.Count}, + Sum: fh.Sum, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, + NegativeSpans: spansToSpansProto(fh.NegativeSpans), + NegativeCounts: fh.NegativeBuckets, + PositiveSpans: spansToSpansProto(fh.PositiveSpans), + PositiveCounts: fh.PositiveBuckets, + ResetHint: Histogram_ResetHint(fh.CounterResetHint), + CustomValues: fh.CustomValues, + Timestamp: timestamp, + } +} + +func spansToSpansProto(s []histogram.Span) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) exemplar.Exemplar { + timestamp := m.Timestamp + + return exemplar.Exemplar{ + Labels: desymbolizeLabels(b, m.LabelsRefs, symbols), + Value: m.Value, + Ts: timestamp, + HasTs: timestamp != 0, + } +} diff --git a/prompb/io/prometheus/write/v2/custom.go b/prompb/io/prometheus/write/v2/custom.go new file mode 100644 index 0000000000..3aa778eb60 --- /dev/null +++ b/prompb/io/prometheus/write/v2/custom.go @@ -0,0 +1,165 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "slices" +) + +func (m Sample) T() int64 { return m.Timestamp } +func (m Sample) V() float64 { return m.Value } + +func (m *Request) OptimizedMarshal(dst []byte) ([]byte, error) { + siz := m.Size() + if cap(dst) < siz { + dst = make([]byte, siz) + } + n, err := m.OptimizedMarshalToSizedBuffer(dst[:siz]) + if err != nil { + return nil, err + } + return dst[:n], nil +} + +// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer, +// but calls OptimizedMarshalToSizedBuffer on the timeseries. +func (m *Request) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].OptimizedMarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Symbols) > 0 { + for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Symbols[iNdEx]) + copy(dAtA[i:], m.Symbols[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + return len(dAtA) - i, nil +} + +// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer, +// but marshals m.LabelsRefs in place without extra allocations. +func (m *TimeSeries) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreatedTimestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + + if len(m.LabelsRefs) > 0 { + // This is the trick: encode the varints in reverse order to make it easier + // to do it in place. Then reverse the whole thing. + var j10 int + start := i + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA[i-1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + i-- + j10++ + } + dAtA[i-1] = uint8(num) + i-- + j10++ + } + slices.Reverse(dAtA[i:start]) + // --- end of trick + + i = encodeVarintTypes(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} diff --git a/prompb/io/prometheus/write/v2/custom_test.go b/prompb/io/prometheus/write/v2/custom_test.go new file mode 100644 index 0000000000..139cbfb225 --- /dev/null +++ b/prompb/io/prometheus/write/v2/custom_test.go @@ -0,0 +1,97 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package writev2 + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestOptimizedMarshal(t *testing.T) { + for _, tt := range []struct { + name string + m *Request + }{ + { + name: "empty", + m: &Request{}, + }, + { + name: "simple", + m: &Request{ + Timeseries: []TimeSeries{ + { + LabelsRefs: []uint32{ + 0, 1, + 2, 3, + 4, 5, + 6, 7, + 8, 9, + 10, 11, + 12, 13, + 14, 15, + }, + + Samples: []Sample{{Value: 1, Timestamp: 0}}, + Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 1, Timestamp: 0}}, + Histograms: nil, + }, + { + LabelsRefs: []uint32{ + 0, 1, + 2, 3, + 4, 5, + 6, 7, + 8, 9, + 10, 11, + 12, 13, + 14, 15, + }, + Samples: []Sample{{Value: 2, Timestamp: 1}}, + Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 2, Timestamp: 1}}, + Histograms: nil, + }, + }, + Symbols: []string{ + "a", "b", + "c", "d", + "e", "f", + "g", "h", + "i", "j", + "k", "l", + "m", "n", + "o", "p", + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + // Keep the slice allocated to mimic what std Marshal + // would give to sized Marshal. + got := make([]byte, 0) + + // Should be the same as the standard marshal. + expected, err := tt.m.Marshal() + require.NoError(t, err) + got, err = tt.m.OptimizedMarshal(got) + require.NoError(t, err) + require.Equal(t, expected, got) + + // Unmarshal should work too. + m := &Request{} + require.NoError(t, m.Unmarshal(got)) + require.Equal(t, tt.m, m) + }) + } +} diff --git a/prompb/io/prometheus/write/v2/symbols.go b/prompb/io/prometheus/write/v2/symbols.go new file mode 100644 index 0000000000..f316a976f2 --- /dev/null +++ b/prompb/io/prometheus/write/v2/symbols.go @@ -0,0 +1,83 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import "github.com/prometheus/prometheus/model/labels" + +// SymbolsTable implements table for easy symbol use. +type SymbolsTable struct { + strings []string + symbolsMap map[string]uint32 +} + +// NewSymbolTable returns a symbol table. +func NewSymbolTable() SymbolsTable { + return SymbolsTable{ + // Empty string is required as a first element. + symbolsMap: map[string]uint32{"": 0}, + strings: []string{""}, + } +} + +// Symbolize adds (if not added before) a string to the symbols table, +// while returning its reference number. +func (t *SymbolsTable) Symbolize(str string) uint32 { + if ref, ok := t.symbolsMap[str]; ok { + return ref + } + ref := uint32(len(t.strings)) + t.strings = append(t.strings, str) + t.symbolsMap[str] = ref + return ref +} + +// SymbolizeLabels symbolize Prometheus labels. +func (t *SymbolsTable) SymbolizeLabels(lbls labels.Labels, buf []uint32) []uint32 { + result := buf[:0] + lbls.Range(func(l labels.Label) { + off := t.Symbolize(l.Name) + result = append(result, off) + off = t.Symbolize(l.Value) + result = append(result, off) + }) + return result +} + +// Symbols returns computes symbols table to put in e.g. Request.Symbols. +// As per spec, order does not matter. +func (t *SymbolsTable) Symbols() []string { + return t.strings +} + +// Reset clears symbols table. +func (t *SymbolsTable) Reset() { + // NOTE: Make sure to keep empty symbol. + t.strings = t.strings[:1] + for k := range t.symbolsMap { + if k == "" { + continue + } + delete(t.symbolsMap, k) + } +} + +// desymbolizeLabels decodes label references, with given symbols to labels. +func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels { + b.Reset() + for i := 0; i < len(labelRefs); i += 2 { + b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]]) + } + b.Sort() + return b.Labels() +} diff --git a/prompb/io/prometheus/write/v2/symbols_test.go b/prompb/io/prometheus/write/v2/symbols_test.go new file mode 100644 index 0000000000..3d852e88f1 --- /dev/null +++ b/prompb/io/prometheus/write/v2/symbols_test.go @@ -0,0 +1,60 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" +) + +func TestSymbolsTable(t *testing.T) { + s := NewSymbolTable() + require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist") + require.Equal(t, uint32(0), s.Symbolize("")) + require.Equal(t, []string{""}, s.Symbols()) + + require.Equal(t, uint32(1), s.Symbolize("abc")) + require.Equal(t, []string{"", "abc"}, s.Symbols()) + + require.Equal(t, uint32(2), s.Symbolize("__name__")) + require.Equal(t, []string{"", "abc", "__name__"}, s.Symbols()) + + require.Equal(t, uint32(3), s.Symbolize("foo")) + require.Equal(t, []string{"", "abc", "__name__", "foo"}, s.Symbols()) + + s.Reset() + require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist") + require.Equal(t, uint32(0), s.Symbolize("")) + + require.Equal(t, uint32(1), s.Symbolize("__name__")) + require.Equal(t, []string{"", "__name__"}, s.Symbols()) + + require.Equal(t, uint32(2), s.Symbolize("abc")) + require.Equal(t, []string{"", "__name__", "abc"}, s.Symbols()) + + ls := labels.FromStrings("__name__", "qwer", "zxcv", "1234") + encoded := s.SymbolizeLabels(ls, nil) + require.Equal(t, []uint32{1, 3, 4, 5}, encoded) + b := labels.NewScratchBuilder(len(encoded)) + decoded := desymbolizeLabels(&b, encoded, s.Symbols()) + require.Equal(t, ls, decoded) + + // Different buf. + ls = labels.FromStrings("__name__", "qwer", "zxcv2222", "1234") + encoded = s.SymbolizeLabels(ls, []uint32{1, 3, 4, 5}) + require.Equal(t, []uint32{1, 3, 6, 5}, encoded) +} diff --git a/prompb/io/prometheus/write/v2/types.pb.go b/prompb/io/prometheus/write/v2/types.pb.go new file mode 100644 index 0000000000..3420d20e25 --- /dev/null +++ b/prompb/io/prometheus/write/v2/types.pb.go @@ -0,0 +1,3236 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: io/prometheus/write/v2/types.proto + +package writev2 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Metadata_MetricType int32 + +const ( + Metadata_METRIC_TYPE_UNSPECIFIED Metadata_MetricType = 0 + Metadata_METRIC_TYPE_COUNTER Metadata_MetricType = 1 + Metadata_METRIC_TYPE_GAUGE Metadata_MetricType = 2 + Metadata_METRIC_TYPE_HISTOGRAM Metadata_MetricType = 3 + Metadata_METRIC_TYPE_GAUGEHISTOGRAM Metadata_MetricType = 4 + Metadata_METRIC_TYPE_SUMMARY Metadata_MetricType = 5 + Metadata_METRIC_TYPE_INFO Metadata_MetricType = 6 + Metadata_METRIC_TYPE_STATESET Metadata_MetricType = 7 +) + +var Metadata_MetricType_name = map[int32]string{ + 0: "METRIC_TYPE_UNSPECIFIED", + 1: "METRIC_TYPE_COUNTER", + 2: "METRIC_TYPE_GAUGE", + 3: "METRIC_TYPE_HISTOGRAM", + 4: "METRIC_TYPE_GAUGEHISTOGRAM", + 5: "METRIC_TYPE_SUMMARY", + 6: "METRIC_TYPE_INFO", + 7: "METRIC_TYPE_STATESET", +} + +var Metadata_MetricType_value = map[string]int32{ + "METRIC_TYPE_UNSPECIFIED": 0, + "METRIC_TYPE_COUNTER": 1, + "METRIC_TYPE_GAUGE": 2, + "METRIC_TYPE_HISTOGRAM": 3, + "METRIC_TYPE_GAUGEHISTOGRAM": 4, + "METRIC_TYPE_SUMMARY": 5, + "METRIC_TYPE_INFO": 6, + "METRIC_TYPE_STATESET": 7, +} + +func (x Metadata_MetricType) String() string { + return proto.EnumName(Metadata_MetricType_name, int32(x)) +} + +func (Metadata_MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{4, 0} +} + +type Histogram_ResetHint int32 + +const ( + Histogram_RESET_HINT_UNSPECIFIED Histogram_ResetHint = 0 + Histogram_RESET_HINT_YES Histogram_ResetHint = 1 + Histogram_RESET_HINT_NO Histogram_ResetHint = 2 + Histogram_RESET_HINT_GAUGE Histogram_ResetHint = 3 +) + +var Histogram_ResetHint_name = map[int32]string{ + 0: "RESET_HINT_UNSPECIFIED", + 1: "RESET_HINT_YES", + 2: "RESET_HINT_NO", + 3: "RESET_HINT_GAUGE", +} + +var Histogram_ResetHint_value = map[string]int32{ + "RESET_HINT_UNSPECIFIED": 0, + "RESET_HINT_YES": 1, + "RESET_HINT_NO": 2, + "RESET_HINT_GAUGE": 3, +} + +func (x Histogram_ResetHint) String() string { + return proto.EnumName(Histogram_ResetHint_name, int32(x)) +} + +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{5, 0} +} + +// Request represents a request to write the given timeseries to a remote destination. +// This message was introduced in the Remote Write 2.0 specification: +// https://prometheus.io/docs/concepts/remote_write_spec_2_0/ +// +// The canonical Content-Type request header value for this message is +// "application/x-protobuf;proto=io.prometheus.write.v2.Request" +// +// NOTE: gogoproto options might change in future for this file, they +// are not part of the spec proto (they only modify the generated Go code, not +// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908 +type Request struct { + // symbols contains a de-duplicated array of string elements used for various + // items in a Request message, like labels and metadata items. For the sender's convenience + // around empty values for optional fields like unit_ref, symbols array MUST start with + // empty string. + // + // To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you + // need to lookup the actual string by index from symbols array. The order of + // strings is up to the sender. The receiver should not assume any particular encoding. + Symbols []string `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"` + // timeseries represents an array of distinct series with 0 or more samples. + Timeseries []TimeSeries `protobuf:"bytes,5,rep,name=timeseries,proto3" json:"timeseries"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(m, src) +} +func (m *Request) XXX_Size() int { + return m.Size() +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetSymbols() []string { + if m != nil { + return m.Symbols + } + return nil +} + +func (m *Request) GetTimeseries() []TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +// TimeSeries represents a single series. +type TimeSeries struct { + // labels_refs is a list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's length is always + // a multiple of two, and the underlying labels should be sorted lexicographically. + // + // Note that there might be multiple TimeSeries objects in the same + // Requests with the same labels e.g. for different exemplars, metadata + // or created timestamp. + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // Timeseries messages can either specify samples or (native) histogram samples + // (histogram field), but not both. For a typical sender (real-time metric + // streaming), in healthy cases, there will be only one sample or histogram. + // + // Samples and histograms are sorted by timestamp (older first). + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Histograms []Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"` + // exemplars represents an optional set of exemplars attached to this series' samples. + Exemplars []Exemplar `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"` + // metadata represents the metadata associated with the given series' samples. + Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"` + // created_timestamp represents an optional created timestamp associated with + // this series' samples in ms format, typically for counter or histogram type + // metrics. Created timestamp represents the time when the counter started + // counting (sometimes referred to as start timestamp), which can increase + // the accuracy of query results. + // + // Note that some receivers might require this and in return fail to + // ingest such samples within the Request. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to + // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{1} +} +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return m.Size() +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetLabelsRefs() []uint32 { + if m != nil { + return m.LabelsRefs + } + return nil +} + +func (m *TimeSeries) GetSamples() []Sample { + if m != nil { + return m.Samples + } + return nil +} + +func (m *TimeSeries) GetHistograms() []Histogram { + if m != nil { + return m.Histograms + } + return nil +} + +func (m *TimeSeries) GetExemplars() []Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +func (m *TimeSeries) GetMetadata() Metadata { + if m != nil { + return m.Metadata + } + return Metadata{} +} + +func (m *TimeSeries) GetCreatedTimestamp() int64 { + if m != nil { + return m.CreatedTimestamp + } + return 0 +} + +// Exemplar is an additional information attached to some series' samples. +// It is typically used to attach an example trace or request ID associated with +// the metric changes. +type Exemplar struct { + // labels_refs is an optional list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's len is always + // a multiple of 2, and the underlying labels should be sorted lexicographically. + // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // value represents an exact example value. This can be useful when the exemplar + // is attached to a histogram, which only gives an estimated value through buckets. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + // timestamp represents the timestamp of the exemplar in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{2} +} +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return m.Size() +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabelsRefs() []uint32 { + if m != nil { + return m.LabelsRefs + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// Sample represents series sample. +type Sample struct { + // value of the sample. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{3} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Sample) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// Metadata represents the metadata associated with the given series' samples. +type Metadata struct { + Type Metadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=io.prometheus.write.v2.Metadata_MetricType" json:"type,omitempty"` + // help_ref is a reference to the Request.symbols array representing help + // text for the metric. Help is optional, reference should point to an empty string in + // such a case. + HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"` + // unit_ref is a reference to the Request.symbols array representing a unit + // for the metric. Unit is optional, reference should point to an empty string in + // such a case. + UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{4} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return m.Size() +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetType() Metadata_MetricType { + if m != nil { + return m.Type + } + return Metadata_METRIC_TYPE_UNSPECIFIED +} + +func (m *Metadata) GetHelpRef() uint32 { + if m != nil { + return m.HelpRef + } + return 0 +} + +func (m *Metadata) GetUnitRef() uint32 { + if m != nil { + return m.UnitRef + } + return 0 +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + // Types that are valid to be assigned to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` + // The schema defines the bucket schema. Currently, valid numbers + // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be + // added in future for new bucketing layouts. + // + // The schema equal to -53 means custom buckets. See + // custom_values field description for more details. + // + // Values between -4 and 8 represent base-2 bucket schema, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n (n is schema value) logarithmic buckets. Or in other words, + // each bucket boundary is the previous boundary times 2^(2^-n). + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + // Types that are valid to be assigned to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` + // Positive Buckets. + // + // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows: + // * The span offset+length points to an the index of the custom_values array + // or +Inf if pointing to the len of the array. + // * The counts and deltas have the same meaning as for exponential histograms. + PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=io.prometheus.write.v2.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // custom_values is an additional field used by non-exponential bucketing layouts. + // + // For custom buckets (-53 schema value) custom_values specify monotonically + // increasing upper inclusive boundaries for the bucket counts with arbitrary + // widths for this histogram. In other words, custom_values represents custom, + // explicit bucketing that could have been converted from the classic histograms. + // + // Those bounds are then referenced by spans in positive_spans with corresponding positive + // counts of deltas (refer to positive_spans for more details). This way we can + // have encode sparse histograms with custom bucketing (many buckets are often + // not used). + // + // Note that for custom bounds, even negative observations are placed in the positive + // counts to simplify the implementation and avoid ambiguity of where to place + // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and + // the zero bucket are unused, if the schema indicates custom bucketing. + // + // For each upper boundary the previous boundary represent the lower exclusive + // boundary for that bucket. The first element is the upper inclusive boundary + // for the first bucket, which implicitly has a lower inclusive bound of -Inf. + // This is similar to "le" label semantics on classic histograms. You may add a + // bucket with an upper bound of 0 to make sure that you really have no negative + // observations, but in practice, native histogram rendering will show both with + // or without first upper boundary 0 and no negative counts as the same case. + // + // The last element is not only the upper inclusive bound of the last regular + // bucket, but implicitly the lower exclusive bound of the +Inf bucket. + CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{5} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +type isHistogram_Count interface { + isHistogram_Count() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() + MarshalTo([]byte) (int, error) + Size() int +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"` +} +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"` +} +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"` +} +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} +func (*Histogram_CountFloat) isHistogram_Count() {} +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (m *Histogram) GetCountInt() uint64 { + if x, ok := m.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (m *Histogram) GetCountFloat() float64 { + if x, ok := m.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (m *Histogram) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCountInt() uint64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpans() []BucketSpan { + if m != nil { + return m.NegativeSpans + } + return nil +} + +func (m *Histogram) GetNegativeDeltas() []int64 { + if m != nil { + return m.NegativeDeltas + } + return nil +} + +func (m *Histogram) GetNegativeCounts() []float64 { + if m != nil { + return m.NegativeCounts + } + return nil +} + +func (m *Histogram) GetPositiveSpans() []BucketSpan { + if m != nil { + return m.PositiveSpans + } + return nil +} + +func (m *Histogram) GetPositiveDeltas() []int64 { + if m != nil { + return m.PositiveDeltas + } + return nil +} + +func (m *Histogram) GetPositiveCounts() []float64 { + if m != nil { + return m.PositiveCounts + } + return nil +} + +func (m *Histogram) GetResetHint() Histogram_ResetHint { + if m != nil { + return m.ResetHint + } + return Histogram_RESET_HINT_UNSPECIFIED +} + +func (m *Histogram) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *Histogram) GetCustomValues() []float64 { + if m != nil { + return m.CustomValues + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Histogram) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{6} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +func init() { + proto.RegisterEnum("io.prometheus.write.v2.Metadata_MetricType", Metadata_MetricType_name, Metadata_MetricType_value) + proto.RegisterEnum("io.prometheus.write.v2.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) + proto.RegisterType((*Request)(nil), "io.prometheus.write.v2.Request") + proto.RegisterType((*TimeSeries)(nil), "io.prometheus.write.v2.TimeSeries") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.write.v2.Exemplar") + proto.RegisterType((*Sample)(nil), "io.prometheus.write.v2.Sample") + proto.RegisterType((*Metadata)(nil), "io.prometheus.write.v2.Metadata") + proto.RegisterType((*Histogram)(nil), "io.prometheus.write.v2.Histogram") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.write.v2.BucketSpan") +} + +func init() { + proto.RegisterFile("io/prometheus/write/v2/types.proto", fileDescriptor_f139519efd9fa8d7) +} + +var fileDescriptor_f139519efd9fa8d7 = []byte{ + // 926 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x5d, 0x6f, 0xe3, 0x44, + 0x14, 0xed, 0xc4, 0x69, 0x3e, 0x6e, 0x9a, 0xac, 0x33, 0xb4, 0x5d, 0x6f, 0x81, 0x6c, 0xd6, 0x08, + 0x88, 0x58, 0x29, 0x91, 0xc2, 0xeb, 0x0a, 0xd4, 0xb4, 0x6e, 0x93, 0x95, 0x92, 0xac, 0x26, 0x2e, + 0x52, 0x79, 0xb1, 0xdc, 0x64, 0x92, 0x58, 0xd8, 0xb1, 0xf1, 0x4c, 0x02, 0xe5, 0xf7, 0xf1, 0xb0, + 0x8f, 0xfc, 0x01, 0x10, 0xf4, 0x9d, 0xff, 0x80, 0x66, 0xfc, 0xd9, 0x42, 0xbb, 0xe2, 0x6d, 0xe6, + 0xdc, 0x73, 0xee, 0x3d, 0xb9, 0xbe, 0x77, 0x02, 0xba, 0xe3, 0xf7, 0x82, 0xd0, 0xf7, 0x28, 0x5f, + 0xd3, 0x2d, 0xeb, 0xfd, 0x14, 0x3a, 0x9c, 0xf6, 0x76, 0xfd, 0x1e, 0xbf, 0x0d, 0x28, 0xeb, 0x06, + 0xa1, 0xcf, 0x7d, 0x7c, 0xec, 0xf8, 0xdd, 0x8c, 0xd3, 0x95, 0x9c, 0xee, 0xae, 0x7f, 0x72, 0xb8, + 0xf2, 0x57, 0xbe, 0xa4, 0xf4, 0xc4, 0x29, 0x62, 0xeb, 0x0c, 0xca, 0x84, 0xfe, 0xb8, 0xa5, 0x8c, + 0x63, 0x0d, 0xca, 0xec, 0xd6, 0xbb, 0xf1, 0x5d, 0xa6, 0x15, 0xdb, 0x4a, 0xa7, 0x4a, 0x92, 0x2b, + 0x1e, 0x02, 0x70, 0xc7, 0xa3, 0x8c, 0x86, 0x0e, 0x65, 0xda, 0x7e, 0x5b, 0xe9, 0xd4, 0xfa, 0x7a, + 0xf7, 0xbf, 0xeb, 0x74, 0x4d, 0xc7, 0xa3, 0x33, 0xc9, 0x1c, 0x14, 0xdf, 0xff, 0xf1, 0x72, 0x8f, + 0xe4, 0xb4, 0x6f, 0x8b, 0x15, 0xa4, 0x16, 0xf5, 0xbf, 0x0b, 0x00, 0x19, 0x0d, 0xbf, 0x84, 0x9a, + 0x6b, 0xdf, 0x50, 0x97, 0x59, 0x21, 0x5d, 0x32, 0x0d, 0xb5, 0x95, 0x4e, 0x9d, 0x40, 0x04, 0x11, + 0xba, 0x64, 0xf8, 0x1b, 0x28, 0x33, 0xdb, 0x0b, 0x5c, 0xca, 0xb4, 0x82, 0x2c, 0xde, 0x7a, 0xac, + 0xf8, 0x4c, 0xd2, 0xe2, 0xc2, 0x89, 0x08, 0x5f, 0x02, 0xac, 0x1d, 0xc6, 0xfd, 0x55, 0x68, 0x7b, + 0x4c, 0x53, 0x64, 0x8a, 0x57, 0x8f, 0xa5, 0x18, 0x26, 0xcc, 0xc4, 0x7e, 0x26, 0xc5, 0xe7, 0x50, + 0xa5, 0x3f, 0x53, 0x2f, 0x70, 0xed, 0x30, 0x6a, 0x52, 0xad, 0xdf, 0x7e, 0x2c, 0x8f, 0x11, 0x13, + 0xe3, 0x34, 0x99, 0x10, 0x0f, 0xa0, 0xe2, 0x51, 0x6e, 0x2f, 0x6c, 0x6e, 0x6b, 0xfb, 0x6d, 0xf4, + 0x54, 0x92, 0x71, 0xcc, 0x8b, 0x93, 0xa4, 0x3a, 0xfc, 0x1a, 0x9a, 0xf3, 0x90, 0xda, 0x9c, 0x2e, + 0x2c, 0xd9, 0x5e, 0x6e, 0x7b, 0x81, 0x56, 0x6a, 0xa3, 0x8e, 0x42, 0xd4, 0x38, 0x60, 0x26, 0xb8, + 0x6e, 0x41, 0x25, 0x71, 0xf3, 0xe1, 0x66, 0x1f, 0xc2, 0xfe, 0xce, 0x76, 0xb7, 0x54, 0x2b, 0xb4, + 0x51, 0x07, 0x91, 0xe8, 0x82, 0x3f, 0x81, 0x6a, 0x56, 0x47, 0x91, 0x75, 0x32, 0x40, 0x7f, 0x03, + 0xa5, 0xa8, 0xf3, 0x99, 0x1a, 0x3d, 0xaa, 0x2e, 0x3c, 0x54, 0xff, 0x55, 0x80, 0x4a, 0xf2, 0x43, + 0xf1, 0xb7, 0x50, 0x14, 0xd3, 0x2c, 0xf5, 0x8d, 0xfe, 0xeb, 0x0f, 0x35, 0x46, 0x1c, 0x42, 0x67, + 0x6e, 0xde, 0x06, 0x94, 0x48, 0x21, 0x7e, 0x01, 0x95, 0x35, 0x75, 0x03, 0xf1, 0xf3, 0xa4, 0xd1, + 0x3a, 0x29, 0x8b, 0x3b, 0xa1, 0x4b, 0x11, 0xda, 0x6e, 0x1c, 0x2e, 0x43, 0xc5, 0x28, 0x24, 0xee, + 0x84, 0x2e, 0xf5, 0xdf, 0x11, 0x40, 0x96, 0x0a, 0x7f, 0x0c, 0xcf, 0xc7, 0x86, 0x49, 0x46, 0x67, + 0x96, 0x79, 0xfd, 0xce, 0xb0, 0xae, 0x26, 0xb3, 0x77, 0xc6, 0xd9, 0xe8, 0x62, 0x64, 0x9c, 0xab, + 0x7b, 0xf8, 0x39, 0x7c, 0x94, 0x0f, 0x9e, 0x4d, 0xaf, 0x26, 0xa6, 0x41, 0x54, 0x84, 0x8f, 0xa0, + 0x99, 0x0f, 0x5c, 0x9e, 0x5e, 0x5d, 0x1a, 0x6a, 0x01, 0xbf, 0x80, 0xa3, 0x3c, 0x3c, 0x1c, 0xcd, + 0xcc, 0xe9, 0x25, 0x39, 0x1d, 0xab, 0x0a, 0x6e, 0xc1, 0xc9, 0xbf, 0x14, 0x59, 0xbc, 0xf8, 0xb0, + 0xd4, 0xec, 0x6a, 0x3c, 0x3e, 0x25, 0xd7, 0xea, 0x3e, 0x3e, 0x04, 0x35, 0x1f, 0x18, 0x4d, 0x2e, + 0xa6, 0x6a, 0x09, 0x6b, 0x70, 0x78, 0x8f, 0x6e, 0x9e, 0x9a, 0xc6, 0xcc, 0x30, 0xd5, 0xb2, 0xfe, + 0x6b, 0x09, 0xaa, 0xe9, 0x64, 0xe3, 0x4f, 0xa1, 0x3a, 0xf7, 0xb7, 0x1b, 0x6e, 0x39, 0x1b, 0x2e, + 0x3b, 0x5d, 0x1c, 0xee, 0x91, 0x8a, 0x84, 0x46, 0x1b, 0x8e, 0x5f, 0x41, 0x2d, 0x0a, 0x2f, 0x5d, + 0xdf, 0xe6, 0xd1, 0x20, 0x0c, 0xf7, 0x08, 0x48, 0xf0, 0x42, 0x60, 0x58, 0x05, 0x85, 0x6d, 0x3d, + 0xd9, 0x60, 0x44, 0xc4, 0x11, 0x1f, 0x43, 0x89, 0xcd, 0xd7, 0xd4, 0xb3, 0x65, 0x6b, 0x9b, 0x24, + 0xbe, 0xe1, 0xcf, 0xa1, 0xf1, 0x0b, 0x0d, 0x7d, 0x8b, 0xaf, 0x43, 0xca, 0xd6, 0xbe, 0xbb, 0x90, + 0x33, 0x8f, 0x48, 0x5d, 0xa0, 0x66, 0x02, 0xe2, 0x2f, 0x62, 0x5a, 0xe6, 0xab, 0x24, 0x7d, 0x21, + 0x72, 0x20, 0xf0, 0xb3, 0xc4, 0xdb, 0x57, 0xa0, 0xe6, 0x78, 0x91, 0xc1, 0xb2, 0x34, 0x88, 0x48, + 0x23, 0x65, 0x46, 0x26, 0xa7, 0xd0, 0xd8, 0xd0, 0x95, 0xcd, 0x9d, 0x1d, 0xb5, 0x58, 0x60, 0x6f, + 0x98, 0x56, 0x79, 0xfa, 0xed, 0x1a, 0x6c, 0xe7, 0x3f, 0x50, 0x3e, 0x0b, 0xec, 0x4d, 0xbc, 0x70, + 0xf5, 0x44, 0x2f, 0x30, 0x86, 0xbf, 0x84, 0x67, 0x69, 0xc2, 0x05, 0x75, 0xb9, 0xcd, 0xb4, 0x6a, + 0x5b, 0xe9, 0x60, 0x92, 0xd6, 0x39, 0x97, 0xe8, 0x3d, 0xa2, 0x74, 0xca, 0x34, 0x68, 0x2b, 0x1d, + 0x94, 0x11, 0xa5, 0x4d, 0x26, 0x2c, 0x06, 0x3e, 0x73, 0x72, 0x16, 0x6b, 0xff, 0xd7, 0x62, 0xa2, + 0x4f, 0x2d, 0xa6, 0x09, 0x63, 0x8b, 0x07, 0x91, 0xc5, 0x04, 0xce, 0x2c, 0xa6, 0xc4, 0xd8, 0x62, + 0x3d, 0xb2, 0x98, 0xc0, 0xb1, 0xc5, 0xb7, 0x00, 0x21, 0x65, 0x94, 0x5b, 0x6b, 0xf1, 0x55, 0x1a, + 0x4f, 0xef, 0x65, 0x3a, 0x63, 0x5d, 0x22, 0x34, 0x43, 0x67, 0xc3, 0x49, 0x35, 0x4c, 0x8e, 0xf7, + 0x1f, 0x82, 0x67, 0x0f, 0x1e, 0x02, 0xfc, 0x19, 0xd4, 0xe7, 0x5b, 0xc6, 0x7d, 0xcf, 0x92, 0xcf, + 0x06, 0xd3, 0x54, 0x69, 0xe8, 0x20, 0x02, 0xbf, 0x93, 0x98, 0xbe, 0x80, 0x6a, 0x9a, 0x1a, 0x9f, + 0xc0, 0x31, 0x11, 0x13, 0x6e, 0x0d, 0x47, 0x13, 0xf3, 0xc1, 0x9a, 0x62, 0x68, 0xe4, 0x62, 0xd7, + 0xc6, 0x4c, 0x45, 0xb8, 0x09, 0xf5, 0x1c, 0x36, 0x99, 0xaa, 0x05, 0xb1, 0x49, 0x39, 0x28, 0xda, + 0x59, 0x65, 0x50, 0x86, 0x7d, 0xd9, 0x94, 0xc1, 0x01, 0x40, 0x36, 0x6f, 0xfa, 0x1b, 0x80, 0xec, + 0x03, 0x88, 0x91, 0xf7, 0x97, 0x4b, 0x46, 0xa3, 0x1d, 0x6a, 0x92, 0xf8, 0x26, 0x70, 0x97, 0x6e, + 0x56, 0x7c, 0x2d, 0x57, 0xa7, 0x4e, 0xe2, 0xdb, 0xe0, 0xe8, 0xfd, 0x5d, 0x0b, 0xfd, 0x76, 0xd7, + 0x42, 0x7f, 0xde, 0xb5, 0xd0, 0xf7, 0x65, 0xd9, 0xb4, 0x5d, 0xff, 0xa6, 0x24, 0xff, 0x8a, 0xbf, + 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xfc, 0x93, 0x1c, 0xde, 0x07, 0x00, 0x00, +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Symbols) > 0 { + for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Symbols[iNdEx]) + copy(dAtA[i:], m.Symbols[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreatedTimestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.LabelsRefs) > 0 { + dAtA3 := make([]byte, len(m.LabelsRefs)*10) + var j2 int + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j2++ + } + dAtA3[j2] = uint8(num) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA3[:j2]) + i = encodeVarintTypes(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.LabelsRefs) > 0 { + dAtA5 := make([]byte, len(m.LabelsRefs)*10) + var j4 int + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ + } + i -= j4 + copy(dAtA[i:], dAtA5[:j4]) + i = encodeVarintTypes(dAtA, i, uint64(j4)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.UnitRef != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.UnitRef)) + i-- + dAtA[i] = 0x20 + } + if m.HelpRef != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.HelpRef)) + i-- + dAtA[i] = 0x18 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.CustomValues) > 0 { + for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- { + f6 := math.Float64bits(float64(m.CustomValues[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.CustomValues)*8)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f7 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f7)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var j8 int + dAtA10 := make([]byte, len(m.PositiveDeltas)*10) + for _, num := range m.PositiveDeltas { + x9 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x9 >= 1<<7 { + dAtA10[j8] = uint8(uint64(x9)&0x7f | 0x80) + j8++ + x9 >>= 7 + } + dAtA10[j8] = uint8(x9) + j8++ + } + i -= j8 + copy(dAtA[i:], dAtA10[:j8]) + i = encodeVarintTypes(dAtA, i, uint64(j8)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f11 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f11)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var j12 int + dAtA14 := make([]byte, len(m.NegativeDeltas)*10) + for _, num := range m.NegativeDeltas { + x13 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x13 >= 1<<7 { + dAtA14[j12] = uint8(uint64(x13)&0x7f | 0x80) + j12++ + x13 >>= 7 + } + dAtA14[j12] = uint8(x13) + j12++ + } + i -= j12 + copy(dAtA[i:], dAtA14[:j12]) + i = encodeVarintTypes(dAtA, i, uint64(j12)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.ZeroCount != nil { + { + size := m.ZeroCount.Size() + i -= size + if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + if m.Count != nil { + { + size := m.Count.Size() + i -= size + if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Symbols) > 0 { + for _, s := range m.Symbols { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TimeSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.Metadata.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.CreatedTimestamp != 0 { + n += 1 + sovTypes(uint64(m.CreatedTimestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.HelpRef != 0 { + n += 1 + sovTypes(uint64(m.HelpRef)) + } + if m.UnitRef != 0 { + n += 1 + sovTypes(uint64(m.UnitRef)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += m.Count.Size() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozTypes(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != nil { + n += m.ZeroCount.Size() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + sovTypes(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if len(m.CustomValues) > 0 { + n += 2 + sovTypes(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram_CountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozTypes(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovTypes(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + m.CreatedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Metadata_MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType) + } + m.HelpRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HelpRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType) + } + m.UnitRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnitRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &Histogram_CountInt{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ZeroCount = &Histogram_ZeroCountInt{v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.CustomValues) == 0 { + m.CustomValues = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/prompb/io/prometheus/write/v2/types.proto b/prompb/io/prometheus/write/v2/types.proto new file mode 100644 index 0000000000..ff6c4936bb --- /dev/null +++ b/prompb/io/prometheus/write/v2/types.proto @@ -0,0 +1,255 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// NOTE: This file is also available on https://buf.build/prometheus/prometheus/docs/main:io.prometheus.write.v2 + +syntax = "proto3"; +package io.prometheus.write.v2; + +option go_package = "writev2"; + +import "gogoproto/gogo.proto"; + +// Request represents a request to write the given timeseries to a remote destination. +// This message was introduced in the Remote Write 2.0 specification: +// https://prometheus.io/docs/concepts/remote_write_spec_2_0/ +// +// The canonical Content-Type request header value for this message is +// "application/x-protobuf;proto=io.prometheus.write.v2.Request" +// +// NOTE: gogoproto options might change in future for this file, they +// are not part of the spec proto (they only modify the generated Go code, not +// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908 +message Request { + // Since Request supersedes 1.0 spec's prometheus.WriteRequest, we reserve the top-down message + // for the deterministic interop between those two, see types_test.go for details. + // Generally it's not needed, because Receivers must use the Content-Type header, but we want to + // be sympathetic to adopters with mistaken implementations and have deterministic error (empty + // message if you use the wrong proto schema). + reserved 1 to 3; + + // symbols contains a de-duplicated array of string elements used for various + // items in a Request message, like labels and metadata items. For the sender's convenience + // around empty values for optional fields like unit_ref, symbols array MUST start with + // empty string. + // + // To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you + // need to lookup the actual string by index from symbols array. The order of + // strings is up to the sender. The receiver should not assume any particular encoding. + repeated string symbols = 4; + // timeseries represents an array of distinct series with 0 or more samples. + repeated TimeSeries timeseries = 5 [(gogoproto.nullable) = false]; +} + +// TimeSeries represents a single series. +message TimeSeries { + // labels_refs is a list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's length is always + // a multiple of two, and the underlying labels should be sorted lexicographically. + // + // Note that there might be multiple TimeSeries objects in the same + // Requests with the same labels e.g. for different exemplars, metadata + // or created timestamp. + repeated uint32 labels_refs = 1; + + // Timeseries messages can either specify samples or (native) histogram samples + // (histogram field), but not both. For a typical sender (real-time metric + // streaming), in healthy cases, there will be only one sample or histogram. + // + // Samples and histograms are sorted by timestamp (older first). + repeated Sample samples = 2 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 3 [(gogoproto.nullable) = false]; + + // exemplars represents an optional set of exemplars attached to this series' samples. + repeated Exemplar exemplars = 4 [(gogoproto.nullable) = false]; + + // metadata represents the metadata associated with the given series' samples. + Metadata metadata = 5 [(gogoproto.nullable) = false]; + + // created_timestamp represents an optional created timestamp associated with + // this series' samples in ms format, typically for counter or histogram type + // metrics. Created timestamp represents the time when the counter started + // counting (sometimes referred to as start timestamp), which can increase + // the accuracy of query results. + // + // Note that some receivers might require this and in return fail to + // ingest such samples within the Request. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to + // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + int64 created_timestamp = 6; +} + +// Exemplar is an additional information attached to some series' samples. +// It is typically used to attach an example trace or request ID associated with +// the metric changes. +message Exemplar { + // labels_refs is an optional list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's len is always + // a multiple of 2, and the underlying labels should be sorted lexicographically. + // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. + repeated uint32 labels_refs = 1; + // value represents an exact example value. This can be useful when the exemplar + // is attached to a histogram, which only gives an estimated value through buckets. + double value = 2; + // timestamp represents the timestamp of the exemplar in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + int64 timestamp = 3; +} + +// Sample represents series sample. +message Sample { + // value of the sample. + double value = 1; + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + int64 timestamp = 2; +} + +// Metadata represents the metadata associated with the given series' samples. +message Metadata { + enum MetricType { + METRIC_TYPE_UNSPECIFIED = 0; + METRIC_TYPE_COUNTER = 1; + METRIC_TYPE_GAUGE = 2; + METRIC_TYPE_HISTOGRAM = 3; + METRIC_TYPE_GAUGEHISTOGRAM = 4; + METRIC_TYPE_SUMMARY = 5; + METRIC_TYPE_INFO = 6; + METRIC_TYPE_STATESET = 7; + } + MetricType type = 1; + // help_ref is a reference to the Request.symbols array representing help + // text for the metric. Help is optional, reference should point to an empty string in + // such a case. + uint32 help_ref = 3; + // unit_ref is a reference to the Request.symbols array representing a unit + // for the metric. Unit is optional, reference should point to an empty string in + // such a case. + uint32 unit_ref = 4; +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +message Histogram { + enum ResetHint { + RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly. + RESET_HINT_YES = 1; // This is the 1st histogram after a counter reset. + RESET_HINT_NO = 2; // There was no counter reset between this and the previous Histogram. + RESET_HINT_GAUGE = 3; // This is a gauge histogram where counter resets don't happen. + } + + oneof count { // Count of observations in the histogram. + uint64 count_int = 1; + double count_float = 2; + } + double sum = 3; // Sum of observations in the histogram. + + // The schema defines the bucket schema. Currently, valid numbers + // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be + // added in future for new bucketing layouts. + // + // The schema equal to -53 means custom buckets. See + // custom_values field description for more details. + // + // Values between -4 and 8 represent base-2 bucket schema, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n (n is schema value) logarithmic buckets. Or in other words, + // each bucket boundary is the previous boundary times 2^(2^-n). + sint32 schema = 4; + double zero_threshold = 5; // Breadth of the zero bucket. + oneof zero_count { // Count in zero bucket. + uint64 zero_count_int = 6; + double zero_count_float = 7; + } + + // Negative Buckets. + repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_counts = 10; // Absolute count of each bucket. + + // Positive Buckets. + // + // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows: + // * The span offset+length points to an the index of the custom_values array + // or +Inf if pointing to the len of the array. + // * The counts and deltas have the same meaning as for exponential histograms. + repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_counts = 13; // Absolute count of each bucket. + + ResetHint reset_hint = 14; + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + int64 timestamp = 15; + + // custom_values is an additional field used by non-exponential bucketing layouts. + // + // For custom buckets (-53 schema value) custom_values specify monotonically + // increasing upper inclusive boundaries for the bucket counts with arbitrary + // widths for this histogram. In other words, custom_values represents custom, + // explicit bucketing that could have been converted from the classic histograms. + // + // Those bounds are then referenced by spans in positive_spans with corresponding positive + // counts of deltas (refer to positive_spans for more details). This way we can + // have encode sparse histograms with custom bucketing (many buckets are often + // not used). + // + // Note that for custom bounds, even negative observations are placed in the positive + // counts to simplify the implementation and avoid ambiguity of where to place + // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and + // the zero bucket are unused, if the schema indicates custom bucketing. + // + // For each upper boundary the previous boundary represent the lower exclusive + // boundary for that bucket. The first element is the upper inclusive boundary + // for the first bucket, which implicitly has a lower inclusive bound of -Inf. + // This is similar to "le" label semantics on classic histograms. You may add a + // bucket with an upper bound of 0 to make sure that you really have no negative + // observations, but in practice, native histogram rendering will show both with + // or without first upper boundary 0 and no negative counts as the same case. + // + // The last element is not only the upper inclusive bound of the last regular + // bucket, but implicitly the lower exclusive bound of the +Inf bucket. + repeated double custom_values = 16; +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} diff --git a/prompb/io/prometheus/write/v2/types_test.go b/prompb/io/prometheus/write/v2/types_test.go new file mode 100644 index 0000000000..5b7622fc2f --- /dev/null +++ b/prompb/io/prometheus/write/v2/types_test.go @@ -0,0 +1,97 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/prompb" +) + +func TestInteropV2UnmarshalWithV1_DeterministicEmpty(t *testing.T) { + expectedV1Empty := &prompb.WriteRequest{} + for _, tc := range []struct{ incoming *Request }{ + { + incoming: &Request{}, // Technically wrong, should be at least empty string in symbol. + }, + { + incoming: &Request{ + Symbols: []string{""}, + }, // NOTE: Without reserved fields, failed with "corrupted" ghost TimeSeries element. + }, + { + incoming: &Request{ + Symbols: []string{"", "__name__", "metric1"}, + Timeseries: []TimeSeries{ + {LabelsRefs: []uint32{1, 2}}, + {Samples: []Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}}}, + }, // NOTE: Without reserved fields, proto: illegal wireType 7 + }, + }, + } { + t.Run("", func(t *testing.T) { + in, err := proto.Marshal(tc.incoming) + require.NoError(t, err) + + // Test accidental unmarshal of v2 payload with v1 proto. + out := &prompb.WriteRequest{} + require.NoError(t, proto.Unmarshal(in, out)) + + // Drop unknowns, we expect them when incoming payload had some fields. + // This field & method will be likely gone after gogo removal. + out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables. + + require.Equal(t, expectedV1Empty, out) + }) + } +} + +func TestInteropV1UnmarshalWithV2_DeterministicEmpty(t *testing.T) { + expectedV2Empty := &Request{} + for _, tc := range []struct{ incoming *prompb.WriteRequest }{ + { + incoming: &prompb.WriteRequest{}, + }, + { + incoming: &prompb.WriteRequest{ + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}}, + Samples: []prompb.Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}}, + }, + }, + }, + // NOTE: Without reserved fields, results in corrupted v2.Request.Symbols. + }, + } { + t.Run("", func(t *testing.T) { + in, err := proto.Marshal(tc.incoming) + require.NoError(t, err) + + // Test accidental unmarshal of v1 payload with v2 proto. + out := &Request{} + require.NoError(t, proto.Unmarshal(in, out)) + + // Drop unknowns, we expect them when incoming payload had some fields. + // This field & method will be likely gone after gogo removal. + out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables. + + require.Equal(t, expectedV2Empty, out) + }) + } +} diff --git a/prompb/rwcommon/codec_test.go b/prompb/rwcommon/codec_test.go new file mode 100644 index 0000000000..2ab95e0d19 --- /dev/null +++ b/prompb/rwcommon/codec_test.go @@ -0,0 +1,298 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rwcommon + +import ( + "testing" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" +) + +func TestToLabels(t *testing.T) { + expected := labels.FromStrings("__name__", "metric1", "foo", "bar") + + t.Run("v1", func(t *testing.T) { + ts := prompb.TimeSeries{Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}, {Name: "foo", Value: "bar"}}} + b := labels.NewScratchBuilder(2) + require.Equal(t, expected, ts.ToLabels(&b, nil)) + require.Equal(t, ts.Labels, prompb.FromLabels(expected, nil)) + require.Equal(t, ts.Labels, prompb.FromLabels(expected, ts.Labels)) + }) + t.Run("v2", func(t *testing.T) { + v2Symbols := []string{"", "__name__", "metric1", "foo", "bar"} + ts := writev2.TimeSeries{LabelsRefs: []uint32{1, 2, 3, 4}} + b := labels.NewScratchBuilder(2) + require.Equal(t, expected, ts.ToLabels(&b, v2Symbols)) + // No need for FromLabels in our prod code as we use symbol table to do so. + }) +} + +func TestFromMetadataType(t *testing.T) { + for _, tc := range []struct { + desc string + input model.MetricType + expectedV1 prompb.MetricMetadata_MetricType + expectedV2 writev2.Metadata_MetricType + }{ + { + desc: "with a single-word metric", + input: model.MetricTypeCounter, + expectedV1: prompb.MetricMetadata_COUNTER, + expectedV2: writev2.Metadata_METRIC_TYPE_COUNTER, + }, + { + desc: "with a two-word metric", + input: model.MetricTypeStateset, + expectedV1: prompb.MetricMetadata_STATESET, + expectedV2: writev2.Metadata_METRIC_TYPE_STATESET, + }, + { + desc: "with an unknown metric", + input: "not-known", + expectedV1: prompb.MetricMetadata_UNKNOWN, + expectedV2: writev2.Metadata_METRIC_TYPE_UNSPECIFIED, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + t.Run("v1", func(t *testing.T) { + require.Equal(t, tc.expectedV1, prompb.FromMetadataType(tc.input)) + }) + t.Run("v2", func(t *testing.T) { + require.Equal(t, tc.expectedV2, writev2.FromMetadataType(tc.input)) + }) + }) + } +} + +func TestToMetadata(t *testing.T) { + sym := writev2.NewSymbolTable() + + for _, tc := range []struct { + input writev2.Metadata + expected metadata.Metadata + }{ + { + input: writev2.Metadata{}, + expected: metadata.Metadata{ + Type: model.MetricTypeUnknown, + }, + }, + { + input: writev2.Metadata{ + Type: 12414, // Unknown. + }, + expected: metadata.Metadata{ + Type: model.MetricTypeUnknown, + }, + }, + { + input: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, + HelpRef: sym.Symbolize("help1"), + UnitRef: sym.Symbolize("unit1"), + }, + expected: metadata.Metadata{ + Type: model.MetricTypeCounter, + Help: "help1", + Unit: "unit1", + }, + }, + { + input: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_STATESET, + HelpRef: sym.Symbolize("help2"), + }, + expected: metadata.Metadata{ + Type: model.MetricTypeStateset, + Help: "help2", + }, + }, + } { + t.Run("", func(t *testing.T) { + ts := writev2.TimeSeries{Metadata: tc.input} + require.Equal(t, tc.expected, ts.ToMetadata(sym.Symbols())) + }) + } +} + +func TestToHistogram_Empty(t *testing.T) { + t.Run("v1", func(t *testing.T) { + require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "") + require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "") + }) + t.Run("v2", func(t *testing.T) { + require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "") + require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "") + }) +} + +// NOTE(bwplotka): This is technically not a valid histogram, but it represents +// important cases to test when copying or converting to/from int/float histograms. +func testIntHistogram() histogram.Histogram { + return histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + Count: 19, + Sum: 2.7, + ZeroThreshold: 1e-128, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 0}, + {Offset: 0, Length: 1}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, + CustomValues: []float64{21421, 523}, + } +} + +// NOTE(bwplotka): This is technically not a valid histogram, but it represents +// important cases to test when copying or converting to/from int/float histograms. +func testFloatHistogram() histogram.FloatHistogram { + return histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + Count: 19, + Sum: 2.7, + ZeroThreshold: 1e-128, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 0}, + {Offset: 0, Length: 1}, + }, + NegativeBuckets: []float64{1, 3, 1, 2, 1, 1}, + CustomValues: []float64{21421, 523}, + } +} + +func TestFromIntToFloatOrIntHistogram(t *testing.T) { + t.Run("v1", func(t *testing.T) { + // v1 does not support nhcb. + testIntHistWithoutNHCB := testIntHistogram() + testIntHistWithoutNHCB.CustomValues = nil + testFloatHistWithoutNHCB := testFloatHistogram() + testFloatHistWithoutNHCB.CustomValues = nil + + h := prompb.FromIntHistogram(123, &testIntHistWithoutNHCB) + require.False(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Equal(t, testIntHistWithoutNHCB, *h.ToIntHistogram()) + require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram()) + }) + t.Run("v2", func(t *testing.T) { + testIntHist := testIntHistogram() + testFloatHist := testFloatHistogram() + + h := writev2.FromIntHistogram(123, &testIntHist) + require.False(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Equal(t, testIntHist, *h.ToIntHistogram()) + require.Equal(t, testFloatHist, *h.ToFloatHistogram()) + }) +} + +func TestFromFloatToFloatHistogram(t *testing.T) { + t.Run("v1", func(t *testing.T) { + // v1 does not support nhcb. + testFloatHistWithoutNHCB := testFloatHistogram() + testFloatHistWithoutNHCB.CustomValues = nil + + h := prompb.FromFloatHistogram(123, &testFloatHistWithoutNHCB) + require.True(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Nil(t, h.ToIntHistogram()) + require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram()) + }) + t.Run("v2", func(t *testing.T) { + testFloatHist := testFloatHistogram() + + h := writev2.FromFloatHistogram(123, &testFloatHist) + require.True(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Nil(t, h.ToIntHistogram()) + require.Equal(t, testFloatHist, *h.ToFloatHistogram()) + }) +} + +func TestFromIntOrFloatHistogram_ResetHint(t *testing.T) { + for _, tc := range []struct { + input histogram.CounterResetHint + expectedV1 prompb.Histogram_ResetHint + expectedV2 writev2.Histogram_ResetHint + }{ + { + input: histogram.UnknownCounterReset, + expectedV1: prompb.Histogram_UNKNOWN, + expectedV2: writev2.Histogram_RESET_HINT_UNSPECIFIED, + }, + { + input: histogram.CounterReset, + expectedV1: prompb.Histogram_YES, + expectedV2: writev2.Histogram_RESET_HINT_YES, + }, + { + input: histogram.NotCounterReset, + expectedV1: prompb.Histogram_NO, + expectedV2: writev2.Histogram_RESET_HINT_NO, + }, + { + input: histogram.GaugeType, + expectedV1: prompb.Histogram_GAUGE, + expectedV2: writev2.Histogram_RESET_HINT_GAUGE, + }, + } { + t.Run("", func(t *testing.T) { + t.Run("v1", func(t *testing.T) { + h := testIntHistogram() + h.CounterResetHint = tc.input + got := prompb.FromIntHistogram(1337, &h) + require.Equal(t, tc.expectedV1, got.GetResetHint()) + + fh := testFloatHistogram() + fh.CounterResetHint = tc.input + got2 := prompb.FromFloatHistogram(1337, &fh) + require.Equal(t, tc.expectedV1, got2.GetResetHint()) + }) + t.Run("v2", func(t *testing.T) { + h := testIntHistogram() + h.CounterResetHint = tc.input + got := writev2.FromIntHistogram(1337, &h) + require.Equal(t, tc.expectedV2, got.GetResetHint()) + + fh := testFloatHistogram() + fh.CounterResetHint = tc.input + got2 := writev2.FromFloatHistogram(1337, &fh) + require.Equal(t, tc.expectedV2, got2.GetResetHint()) + }) + }) + } +} diff --git a/promql/bench_test.go b/promql/bench_test.go index fb3b6ac74b..cd6d1190ca 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/teststorage" @@ -116,7 +117,7 @@ func rangeQueryCases() []benchCase { }, // Holt-Winters and long ranges. { - expr: "holt_winters(a_X[1d], 0.3, 0.3)", + expr: "double_exponential_smoothing(a_X[1d], 0.3, 0.3)", }, { expr: "changes(a_X[1d])", @@ -165,6 +166,9 @@ func rangeQueryCases() []benchCase { { expr: "sum(a_X)", }, + { + expr: "avg(a_X)", + }, { expr: "sum without (l)(h_X)", }, @@ -187,6 +191,21 @@ func rangeQueryCases() []benchCase { { expr: "topk(5, a_X)", }, + { + expr: "limitk(1, a_X)", + }, + { + expr: "limitk(5, a_X)", + }, + { + expr: "limit_ratio(0.1, a_X)", + }, + { + expr: "limit_ratio(0.5, a_X)", + }, + { + expr: "limit_ratio(-0.5, a_X)", + }, // Combinations. { expr: "rate(a_X[1m]) + rate(b_X[1m])", @@ -256,7 +275,7 @@ func BenchmarkRangeQuery(b *testing.B) { MaxSamples: 50000000, Timeout: 100 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(b, opts) const interval = 10000 // 10s interval. // A day of data plus 10k steps. @@ -347,7 +366,7 @@ func BenchmarkNativeHistograms(b *testing.B) { for _, tc := range cases { b.Run(tc.name, func(b *testing.B) { - ng := promql.NewEngine(opts) + ng := promqltest.NewTestEngineWithOpts(b, opts) for i := 0; i < b.N; i++ { qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step) if err != nil { diff --git a/promql/engine.go b/promql/engine.go index a5377d1645..e10be63783 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -19,6 +19,7 @@ import ( "context" "errors" "fmt" + "io" "math" "reflect" "runtime" @@ -271,6 +272,8 @@ func contextErr(err error, env string) error { // // 2) Enforcement of the maximum number of concurrent queries. type QueryTracker interface { + io.Closer + // GetMaxConcurrent returns maximum number of concurrent queries that are allowed by this tracker. GetMaxConcurrent() int @@ -313,6 +316,11 @@ type EngineOpts struct { // EnablePerStepStats if true allows for per-step stats to be computed on request. Disabled otherwise. EnablePerStepStats bool + + // EnableDelayedNameRemoval delays the removal of the __name__ label to the last step of the query evaluation. + // This is useful in certain scenarios where the __name__ label must be preserved or where applying a + // regex-matcher to the __name__ label may otherwise lead to duplicate labelset errors. + EnableDelayedNameRemoval bool } // Engine handles the lifetime of queries from beginning to end. @@ -330,6 +338,7 @@ type Engine struct { enableAtModifier bool enableNegativeOffset bool enablePerStepStats bool + enableDelayedNameRemoval bool } // NewEngine returns a new engine. @@ -420,9 +429,22 @@ func NewEngine(opts EngineOpts) *Engine { enableAtModifier: opts.EnableAtModifier, enableNegativeOffset: opts.EnableNegativeOffset, enablePerStepStats: opts.EnablePerStepStats, + enableDelayedNameRemoval: opts.EnableDelayedNameRemoval, } } +// Close closes ng. +func (ng *Engine) Close() error { + if ng == nil { + return nil + } + + if ng.activeQueryTracker != nil { + return ng.activeQueryTracker.Close() + } + return nil +} + // SetQueryLogger sets the query logger. func (ng *Engine) SetQueryLogger(l QueryLogger) { ng.queryLoggerLock.Lock() @@ -573,7 +595,7 @@ func (ng *Engine) validateOpts(expr parser.Expr) error { return validationErr } -// NewTestQuery: inject special behaviour into Query for testing. +// NewTestQuery injects special behaviour into Query for testing. func (ng *Engine) NewTestQuery(f func(context.Context) error) Query { qry := &query{ q: "test statement", @@ -706,16 +728,16 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval startTimestamp: start, endTimestamp: start, interval: 1, - ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, lookbackDelta: s.LookbackDelta, samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ng.enableDelayedNameRemoval, } query.sampleStats.InitStepTracking(start, start, 1) - val, warnings, err := evaluator.Eval(s.Expr) + val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) evalSpanTimer.Finish() @@ -743,9 +765,9 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval // Point might have a different timestamp, force it to the evaluation // timestamp as that is when we ran the evaluation. if len(s.Histograms) > 0 { - vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start} + vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start, DropName: s.DropName} } else { - vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start} + vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start, DropName: s.DropName} } } return vector, warnings, nil @@ -764,15 +786,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval startTimestamp: timeMilliseconds(s.Start), endTimestamp: timeMilliseconds(s.End), interval: durationMilliseconds(s.Interval), - ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, lookbackDelta: s.LookbackDelta, samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ng.enableDelayedNameRemoval, } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) - val, warnings, err := evaluator.Eval(s.Expr) + val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) evalSpanTimer.Finish() @@ -887,11 +909,17 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path } if evalRange == 0 { - start -= durationMilliseconds(s.LookbackDelta) + // Reduce the start by one fewer ms than the lookback delta + // because wo want to exclude samples that are precisely the + // lookback delta before the eval time. + start -= durationMilliseconds(s.LookbackDelta) - 1 } else { - // For all matrix queries we want to ensure that we have (end-start) + range selected - // this way we have `range` data before the start time - start -= durationMilliseconds(evalRange) + // For all matrix queries we want to ensure that we have + // (end-start) + range selected this way we have `range` data + // before the start time. We subtract one from the range to + // exclude samples positioned directly at the lower boundary of + // the range. + start -= durationMilliseconds(evalRange) - 1 } offsetMilliseconds := durationMilliseconds(n.OriginalOffset) @@ -976,6 +1004,8 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) { return false, nil } +// checkAndExpandSeriesSet expands expr's UnexpandedSeriesSet into expr's Series. +// If the Series field is already non-nil, it's a no-op. func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations.Annotations, error) { switch e := expr.(type) { case *parser.MatrixSelector: @@ -984,6 +1014,8 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations if e.Series != nil { return nil, nil } + span := trace.SpanFromContext(ctx) + span.AddEvent("expand start", trace.WithAttributes(attribute.String("selector", e.String()))) series, ws, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet) if e.SkipHistogramBuckets { for i := range series { @@ -991,6 +1023,7 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations } } e.Series = series + span.AddEvent("expand end", trace.WithAttributes(attribute.Int("num_series", len(series)))) return ws, err } return nil, nil @@ -1020,8 +1053,6 @@ func (e errWithWarnings) Error() string { return e.err.Error() } // querier and reports errors. On timeout or cancellation of its context it // terminates. type evaluator struct { - ctx context.Context - startTimestamp int64 // Start time in milliseconds. endTimestamp int64 // End time in milliseconds. interval int64 // Interval in milliseconds. @@ -1032,6 +1063,7 @@ type evaluator struct { lookbackDelta time.Duration samplesStats *stats.QuerySamples noStepSubqueryIntervalFn func(rangeMillis int64) int64 + enableDelayedNameRemoval bool } // errorf causes a panic with the input formatted into an error. @@ -1057,7 +1089,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] - level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf)) + level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err @@ -1069,10 +1101,13 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp } } -func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { +func (ev *evaluator) Eval(ctx context.Context, expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { defer ev.recover(expr, &ws, &err) - v, ws = ev.eval(expr) + v, ws = ev.eval(ctx, expr) + if ev.enableDelayedNameRemoval { + ev.cleanupMetricLabels(v) + } return v, ws, nil } @@ -1101,6 +1136,9 @@ type EvalNodeHelper struct { rightSigs map[string]Sample matchedSigs map[string]map[uint64]struct{} resultMetric map[string]labels.Labels + + // Additional options for the evaluation. + enableDelayedNameRemoval bool } func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { @@ -1117,7 +1155,7 @@ func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { // function call results. // The prepSeries function (if provided) can be used to prepare the helper // for each series, then passed to each call funcCall. -func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) @@ -1128,7 +1166,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // Functions will take string arguments from the expressions, not the values. if e != nil && e.Type() != parser.ValueTypeString { // ev.currentSamples will be updated to the correct value within the ev.eval call. - val, ws := ev.eval(e) + val, ws := ev.eval(ctx, e) warnings.Merge(ws) matrixes[i] = val.(Matrix) @@ -1150,7 +1188,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) biggestLen = len(matrixes[i]) } } - enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)} + enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} type seriesAndTimestamp struct { Series ts int64 @@ -1180,7 +1218,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } // Reset number of samples in memory after each timestamp. @@ -1196,12 +1234,12 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) for si, series := range matrixes[i] { switch { case len(series.Floats) > 0 && series.Floats[0].T == ts: - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts}) + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts, DropName: series.DropName}) // Move input vectors forward so we don't have to re-scan the same // past points at the next step. matrixes[i][si].Floats = series.Floats[1:] case len(series.Histograms) > 0 && series.Histograms[0].T == ts: - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts}) + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts, DropName: series.DropName}) matrixes[i][si].Histograms = series.Histograms[1:] default: continue @@ -1240,15 +1278,15 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // If this could be an instant query, shortcut so as not to change sort order. if ev.endTimestamp == ev.startTimestamp { - if result.ContainsSameLabelset() { + if !ev.enableDelayedNameRemoval && result.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } mat := make(Matrix, len(result)) for i, s := range result { if s.H == nil { - mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}} + mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}, DropName: s.DropName} } else { - mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}} + mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}, DropName: s.DropName} } } ev.currentSamples = originalNumSamples + mat.TotalSamples() @@ -1266,7 +1304,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } ss.ts = ts } else { - ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts} + ss = seriesAndTimestamp{Series{Metric: sample.Metric, DropName: sample.DropName}, ts} } addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps) seriess[h] = ss @@ -1290,7 +1328,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) return mat, warnings } -func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) { // Keep a copy of the original point slice so that it can be returned to the pool. origMatrix := slices.Clone(inputMatrix) defer func() { @@ -1302,7 +1340,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping var warnings annotations.Annotations - enh := &EvalNodeHelper{} + enh := &EvalNodeHelper{enableDelayedNameRemoval: ev.enableDelayedNameRemoval} tempNumSamples := ev.currentSamples // Create a mapping from input series to output groups. @@ -1318,7 +1356,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping index, ok := groupToResultIndex[groupingKey] // Add a new group if it doesn't exist. if !ok { - if aggExpr.Op != parser.TOPK && aggExpr.Op != parser.BOTTOMK { + if aggExpr.Op != parser.TOPK && aggExpr.Op != parser.BOTTOMK && aggExpr.Op != parser.LIMITK && aggExpr.Op != parser.LIMIT_RATIO { m := generateGroupingLabels(enh, series.Metric, aggExpr.Without, sortedGrouping) result = append(result, Series{Metric: m}) } @@ -1331,9 +1369,10 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping groups := make([]groupedAggregation, groupCount) var k int + var ratio float64 var seriess map[uint64]Series switch aggExpr.Op { - case parser.TOPK, parser.BOTTOMK: + case parser.TOPK, parser.BOTTOMK, parser.LIMITK: if !convertibleToInt64(param) { ev.errorf("Scalar value %v overflows int64", param) } @@ -1345,6 +1384,23 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping return nil, warnings } seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash. + case parser.LIMIT_RATIO: + if math.IsNaN(param) { + ev.errorf("Ratio value %v is NaN", param) + } + switch { + case param == 0: + return nil, warnings + case param < -1.0: + ratio = -1.0 + warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange())) + case param > 1.0: + ratio = 1.0 + warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange())) + default: + ratio = param + } + seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash. case parser.QUANTILE: if math.IsNaN(param) || param < 0 || param > 1 { warnings.Add(annotations.NewInvalidQuantileWarning(param, aggExpr.Param.PositionRange())) @@ -1352,7 +1408,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping } for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } // Reset number of samples in memory after each timestamp. @@ -1362,11 +1418,12 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping enh.Ts = ts var ws annotations.Annotations switch aggExpr.Op { - case parser.TOPK, parser.BOTTOMK: - result, ws = ev.aggregationK(aggExpr, k, inputMatrix, seriesToResult, groups, enh, seriess) + case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO: + result, ws = ev.aggregationK(aggExpr, k, ratio, inputMatrix, seriesToResult, groups, enh, seriess) // If this could be an instant query, shortcut so as not to change sort order. if ev.endTimestamp == ev.startTimestamp { - return result, ws + warnings.Merge(ws) + return result, warnings } default: ws = ev.aggregation(aggExpr, param, inputMatrix, result, seriesToResult, groups, enh) @@ -1381,7 +1438,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping // Assemble the output matrix. By the time we get here we know we don't have too many samples. switch aggExpr.Op { - case parser.TOPK, parser.BOTTOMK: + case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO: result = make(Matrix, 0, len(seriess)) for _, ss := range seriess { result = append(result, ss) @@ -1400,13 +1457,77 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping return result, warnings } +// evalVectorSelector generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from vs. +// vs.Series has to be expanded before calling this method. +// For every series iterator in vs.Series, the method iterates in ev.interval sized steps from ev.startTimestamp until and including ev.endTimestamp, +// collecting every corresponding sample (obtained via ev.vectorSelectorSingle) into a Series. +// All of the generated Series are collected into a Matrix, that gets returned. +func (ev *evaluator) evalVectorSelector(ctx context.Context, vs *parser.VectorSelector) Matrix { + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + + mat := make(Matrix, 0, len(vs.Series)) + var prevSS *Series + it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator + for _, s := range vs.Series { + if err := contextDone(ctx, "expression evaluation"); err != nil { + ev.error(err) + } + + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) + ss := Series{ + Metric: s.Labels(), + } + + for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { + step++ + _, f, h, ok := ev.vectorSelectorSingle(it, vs, ts) + if !ok { + continue + } + + if h == nil { + ev.currentSamples++ + ev.samplesStats.IncrementSamplesAtStep(step, 1) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if ss.Floats == nil { + ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) + } + ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) + } else { + point := HPoint{H: h, T: ts} + histSize := point.size() + ev.currentSamples += histSize + ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize)) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if ss.Histograms == nil { + ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps) + } + ss.Histograms = append(ss.Histograms, point) + } + } + + if len(ss.Floats)+len(ss.Histograms) > 0 { + mat = append(mat, ss) + prevSS = &mat[len(mat)-1] + } + } + ev.samplesStats.UpdatePeak(ev.currentSamples) + return mat +} + // evalSubquery evaluates given SubqueryExpr and returns an equivalent // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. -func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { +func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { samplesStats := ev.samplesStats // Avoid double counting samples when running a subquery, those samples will be counted in later stage. ev.samplesStats = ev.samplesStats.NewChild() - val, ws := ev.eval(subq) + val, ws := ev.eval(ctx, subq) // But do incorporate the peak from the subquery samplesStats.UpdatePeakFromSubquery(ev.samplesStats) ev.samplesStats = samplesStats @@ -1433,18 +1554,20 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele } // eval evaluates the given expression as the given AST expression node requires. -func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotations) { +func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, annotations.Annotations) { // This is the top-level evaluation method. // Thus, we check for timeout/cancellation here. - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 // Create a new span to help investigate inner evaluation performances. - ctxWithSpan, span := otel.Tracer("").Start(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) - ev.ctx = ctxWithSpan + ctx, span := otel.Tracer("").Start(ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) defer span.End() + if ss, ok := expr.(interface{ ShortString() string }); ok { + span.SetAttributes(attribute.String("operation", ss.ShortString())) + } switch e := expr.(type) { case *parser.AggregateExpr: @@ -1465,7 +1588,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio sortedGrouping = append(sortedGrouping, valueLabel.Val) slices.Sort(sortedGrouping) } - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh) }, e.Expr) } @@ -1475,16 +1598,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio // param is the number k for topk/bottomk, or q for quantile. var fParam float64 if param != nil { - val, ws := ev.eval(param) + val, ws := ev.eval(ctx, param) warnings.Merge(ws) fParam = val.(Matrix)[0].Floats[0].F } // Now fetch the data to be aggregated. - val, ws := ev.eval(e.Expr) + val, ws := ev.eval(ctx, e.Expr) warnings.Merge(ws) inputMatrix := val.(Matrix) - result, ws := ev.rangeEvalAgg(e, sortedGrouping, inputMatrix, fParam) + result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fParam) warnings.Merge(ws) ev.currentSamples = originalNumSamples + result.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1502,7 +1625,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio unwrapParenExpr(&arg) vs, ok := arg.(*parser.VectorSelector) if ok { - return ev.rangeEvalTimestampFunctionOverVectorSelector(vs, call, e) + return ev.rangeEvalTimestampFunctionOverVectorSelector(ctx, vs, call, e) } } @@ -1526,7 +1649,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio matrixArgIndex = i matrixArg = true // Replacing parser.SubqueryExpr with parser.MatrixSelector. - val, totalSamples, ws := ev.evalSubquery(subq) + val, totalSamples, ws := ev.evalSubquery(ctx, subq) e.Args[i] = val warnings.Merge(ws) defer func() { @@ -1541,14 +1664,14 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio // Special handling for functions that work on series not samples. switch e.Func.Name { case "label_replace": - return ev.evalLabelReplace(e.Args) + return ev.evalLabelReplace(ctx, e.Args) case "label_join": - return ev.evalLabelJoin(e.Args) + return ev.evalLabelJoin(ctx, e.Args) } if !matrixArg { // Does not have a matrix argument. - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, annos := call(v, e.Args, enh) return vec, warnings.Merge(annos) }, e.Args...) @@ -1560,7 +1683,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio otherInArgs := make([]Vector, len(e.Args)) for i, e := range e.Args { if i != matrixArgIndex { - val, ws := ev.eval(e) + val, ws := ev.eval(ctx, e) otherArgs[i] = val.(Matrix) otherInArgs[i] = Vector{Sample{}} inArgs[i] = otherInArgs[i] @@ -1574,7 +1697,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio sel := arg.(*parser.MatrixSelector) selVS := sel.VectorSelector.(*parser.VectorSelector) - ws, err := checkAndExpandSeriesSet(ev.ctx, sel) + ws, err := checkAndExpandSeriesSet(ctx, sel) warnings.Merge(ws) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings}) @@ -1592,12 +1715,19 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio var prevSS *Series inMatrix := make(Matrix, 1) inArgs[matrixArgIndex] = inMatrix - enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} + enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} // Process all the calls for one time series at a time. it := storage.NewBuffer(selRange) var chkIter chunkenc.Iterator + + // The last_over_time function acts like offset; thus, it + // should keep the metric name. For all the other range + // vector functions, the only change needed is to drop the + // metric name in the output. + dropName := e.Func.Name != "last_over_time" + for i, s := range selVS.Series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } ev.currentSamples -= len(floats) + totalHPointSize(histograms) @@ -1610,15 +1740,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio chkIter = s.Iterator(chkIter) it.Reset(chkIter) metric := selVS.Series[i].Labels() - // The last_over_time function acts like offset; thus, it - // should keep the metric name. For all the other range - // vector functions, the only change needed is to drop the - // metric name in the output. - if e.Func.Name != "last_over_time" { + if !ev.enableDelayedNameRemoval && dropName { metric = metric.DropMetricName() } ss := Series{ - Metric: metric, + Metric: metric, + DropName: dropName, } inMatrix[0].Metric = selVS.Series[i].Labels() for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -1681,9 +1808,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio ev.samplesStats.UpdatePeak(ev.currentSamples) if e.Func.Name == "rate" || e.Func.Name == "increase" { - samples := inMatrix[0] - metricName := samples.Metric.Get(labels.MetricName) - if metricName != "" && len(samples.Floats) > 0 && + metricName := inMatrix[0].Metric.Get(labels.MetricName) + if metricName != "" && len(ss.Floats) > 0 && !strings.HasSuffix(metricName, "_total") && !strings.HasSuffix(metricName, "_sum") && !strings.HasSuffix(metricName, "_count") && @@ -1733,32 +1859,38 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio return Matrix{ Series{ - Metric: createLabelsForAbsentFunction(e.Args[0]), - Floats: newp, + Metric: createLabelsForAbsentFunction(e.Args[0]), + Floats: newp, + DropName: dropName, }, }, warnings } - if mat.ContainsSameLabelset() { + if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } - return mat, warnings case *parser.ParenExpr: - return ev.eval(e.Expr) + return ev.eval(ctx, e.Expr) case *parser.UnaryExpr: - val, ws := ev.eval(e.Expr) + val, ws := ev.eval(ctx, e.Expr) mat := val.(Matrix) if e.Op == parser.SUB { for i := range mat { - mat[i].Metric = mat[i].Metric.DropMetricName() + if !ev.enableDelayedNameRemoval { + mat[i].Metric = mat[i].Metric.DropMetricName() + } + mat[i].DropName = true for j := range mat[i].Floats { mat[i].Floats[j].F = -mat[i].Floats[j].F } + for j := range mat[i].Histograms { + mat[i].Histograms[j].H = mat[i].Histograms[j].H.Copy().Mul(-1) + } } - if mat.ContainsSameLabelset() { + if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } } @@ -1767,7 +1899,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio case *parser.BinaryExpr: switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) @@ -1780,120 +1912,73 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio } switch e.Op { case parser.LAND: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LOR: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LUNLESS: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) default: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case *parser.NumberLiteral: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + span.SetAttributes(attribute.Float64("value", e.Val)) + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: + span.SetAttributes(attribute.String("value", e.Val)) return String{V: e.Val, T: ev.startTimestamp}, nil case *parser.VectorSelector: - ws, err := checkAndExpandSeriesSet(ev.ctx, e) + ws, err := checkAndExpandSeriesSet(ctx, e) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } - mat := make(Matrix, 0, len(e.Series)) - var prevSS *Series - it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) - var chkIter chunkenc.Iterator - for i, s := range e.Series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { - ev.error(err) - } - chkIter = s.Iterator(chkIter) - it.Reset(chkIter) - ss := Series{ - Metric: e.Series[i].Labels(), - } - - for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { - step++ - _, f, h, ok := ev.vectorSelectorSingle(it, e, ts) - if ok { - if h == nil { - ev.currentSamples++ - ev.samplesStats.IncrementSamplesAtStep(step, 1) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - if ss.Floats == nil { - ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) - } - ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) - } else { - point := HPoint{H: h, T: ts} - histSize := point.size() - ev.currentSamples += histSize - ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize)) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - if ss.Histograms == nil { - ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps) - } - ss.Histograms = append(ss.Histograms, point) - } - } - } - - if len(ss.Floats)+len(ss.Histograms) > 0 { - mat = append(mat, ss) - prevSS = &mat[len(mat)-1] - } - } - ev.samplesStats.UpdatePeak(ev.currentSamples) + mat := ev.evalVectorSelector(ctx, e) return mat, ws case *parser.MatrixSelector: if ev.startTimestamp != ev.endTimestamp { panic(errors.New("cannot do range evaluation of matrix selector")) } - return ev.matrixSelector(e) + return ev.matrixSelector(ctx, e) case *parser.SubqueryExpr: offsetMillis := durationMilliseconds(e.Offset) rangeMillis := durationMilliseconds(e.Range) newEv := &evaluator{ endTimestamp: ev.endTimestamp - offsetMillis, - ctx: ev.ctx, currentSamples: ev.currentSamples, maxSamples: ev.maxSamples, logger: ev.logger, lookbackDelta: ev.lookbackDelta, samplesStats: ev.samplesStats.NewChild(), noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ev.enableDelayedNameRemoval, } if e.Step != 0 { @@ -1916,7 +2001,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio setOffsetForAtModifier(newEv.startTimestamp, e.Expr) } - res, ws := newEv.eval(e.Expr) + res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) ev.samplesStats.IncrementSamplesAtTimestamp(ev.endTimestamp, newEv.samplesStats.TotalSamples) @@ -1924,22 +2009,22 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio case *parser.StepInvariantExpr: switch ce := e.Expr.(type) { case *parser.StringLiteral, *parser.NumberLiteral: - return ev.eval(ce) + return ev.eval(ctx, ce) } newEv := &evaluator{ startTimestamp: ev.startTimestamp, endTimestamp: ev.startTimestamp, // Always a single evaluation. interval: ev.interval, - ctx: ev.ctx, currentSamples: ev.currentSamples, maxSamples: ev.maxSamples, logger: ev.logger, lookbackDelta: ev.lookbackDelta, samplesStats: ev.samplesStats.NewChild(), noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ev.enableDelayedNameRemoval, } - res, ws := newEv.eval(e.Expr) + res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -2015,8 +2100,8 @@ func reuseOrGetFPointSlices(prevSS *Series, numSteps int) (r []FPoint) { return getFPointSlice(numSteps) } -func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { - ws, err := checkAndExpandSeriesSet(ev.ctx, vs) +func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Context, vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { + ws, err := checkAndExpandSeriesSet(ctx, vs) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -2024,10 +2109,10 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series)) for i, s := range vs.Series { it := s.Iterator(nil) - seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) + seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1) } - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -2086,7 +2171,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no if valueType == chunkenc.ValNone || t > refTime { var ok bool t, v, h, ok = it.PeekPrev() - if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { + if !ok || t <= refTime-durationMilliseconds(ev.lookbackDelta) { return 0, 0, nil, false } } @@ -2163,7 +2248,7 @@ func putMatrixSelectorHPointSlice(p []HPoint) { } // matrixSelector evaluates a *parser.MatrixSelector expression. -func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annotations.Annotations) { +func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSelector) (Matrix, annotations.Annotations) { var ( vs = node.VectorSelector.(*parser.VectorSelector) @@ -2174,7 +2259,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota it = storage.NewBuffer(durationMilliseconds(node.Range)) ) - ws, err := checkAndExpandSeriesSet(ev.ctx, node) + ws, err := checkAndExpandSeriesSet(ctx, node) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -2182,7 +2267,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota var chkIter chunkenc.Iterator series := vs.Series for i, s := range series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } chkIter = s.Iterator(chkIter) @@ -2220,20 +2305,20 @@ func (ev *evaluator) matrixIterSlice( mintFloats, mintHistograms := mint, mint // First floats... - if len(floats) > 0 && floats[len(floats)-1].T >= mint { + if len(floats) > 0 && floats[len(floats)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; floats[drop].T < mint; drop++ { + for drop = 0; floats[drop].T <= mint; drop++ { } ev.currentSamples -= drop copy(floats, floats[drop:]) floats = floats[:len(floats)-drop] // Only append points with timestamps after the last timestamp we have. - mintFloats = floats[len(floats)-1].T + 1 + mintFloats = floats[len(floats)-1].T } else { ev.currentSamples -= len(floats) if floats != nil { @@ -2242,14 +2327,14 @@ func (ev *evaluator) matrixIterSlice( } // ...then the same for histograms. TODO(beorn7): Use generics? - if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint { + if len(histograms) > 0 && histograms[len(histograms)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; histograms[drop].T < mint; drop++ { + for drop = 0; histograms[drop].T <= mint; drop++ { } // Rotate the buffer around the drop index so that points before mint can be // reused to store new histograms. @@ -2260,7 +2345,7 @@ func (ev *evaluator) matrixIterSlice( histograms = histograms[:len(histograms)-drop] ev.currentSamples -= totalHPointSize(histograms) // Only append points with timestamps after the last timestamp we have. - mintHistograms = histograms[len(histograms)-1].T + 1 + mintHistograms = histograms[len(histograms)-1].T } else { ev.currentSamples -= totalHPointSize(histograms) if histograms != nil { @@ -2284,7 +2369,7 @@ loop: case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: t := buf.AtT() // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintHistograms { + if t > mintHistograms { if histograms == nil { histograms = getMatrixSelectorHPoints() } @@ -2310,7 +2395,7 @@ loop: continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintFloats { + if t > mintFloats { ev.currentSamples++ if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) @@ -2337,6 +2422,11 @@ loop: } else { histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}}) } + if histograms[n].H == nil { + // Make sure to pass non-nil H to AtFloatHistogram so that it does a deep-copy. + // Not an issue in the loop above since that uses an intermediate buffer. + histograms[n].H = &histogram.FloatHistogram{} + } histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H) if value.IsStaleNaN(histograms[n].H.Sum) { histograms = histograms[:n] @@ -2529,7 +2619,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * continue } metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) - if returnBool { + if !ev.enableDelayedNameRemoval && returnBool { metric = metric.DropMetricName() } insertedSigs, exists := matchedSigs[sig] @@ -2554,9 +2644,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * } enh.Out = append(enh.Out, Sample{ - Metric: metric, - F: floatValue, - H: histogramValue, + Metric: metric, + F: floatValue, + H: histogramValue, + DropName: returnBool, }) } return enh.Out, lastErr @@ -2656,7 +2747,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala lhsSample.F = float lhsSample.H = histogram if shouldDropMetricName(op) || returnBool { - lhsSample.Metric = lhsSample.Metric.DropMetricName() + if !ev.enableDelayedNameRemoval { + lhsSample.Metric = lhsSample.Metric.DropMetricName() + } + lhsSample.DropName = true } enh.Out = append(enh.Out, lhsSample) } @@ -2754,14 +2848,20 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram } type groupedAggregation struct { - seen bool // Was this output groups seen in the input at this timestamp. - hasFloat bool // Has at least 1 float64 sample aggregated. - hasHistogram bool // Has at least 1 histogram sample aggregated. floatValue float64 histogramValue *histogram.FloatHistogram - floatMean float64 // Mean, or "compensating value" for Kahan summation. - groupCount int + floatMean float64 + floatKahanC float64 // "Compensating value" for Kahan summation. + groupCount float64 heap vectorByValueHeap + + // All bools together for better packing within the struct. + seen bool // Was this output groups seen in the input at this timestamp. + hasFloat bool // Has at least 1 float64 sample aggregated. + hasHistogram bool // Has at least 1 histogram sample aggregated. + incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets. + groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group. + incrementalMean bool // True after reverting to incremental calculation of the mean value. } // aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix. @@ -2785,15 +2885,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // Initialize this group if it's the first time we've seen it. if !group.seen { *group = groupedAggregation{ - seen: true, - floatValue: f, - groupCount: 1, + seen: true, + floatValue: f, + floatMean: f, + incompatibleHistograms: false, + groupCount: 1, } switch op { - case parser.AVG: - group.floatMean = f - fallthrough - case parser.SUM: + case parser.AVG, parser.SUM: if h == nil { group.hasFloat = true } else { @@ -2801,7 +2900,6 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix group.hasHistogram = true } case parser.STDVAR, parser.STDDEV: - group.floatMean = f group.floatValue = 0 case parser.QUANTILE: group.heap = make(vectorByValueHeap, 1) @@ -2812,6 +2910,10 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix continue } + if group.incompatibleHistograms { + continue + } + switch op { case parser.SUM: if h != nil { @@ -2820,6 +2922,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix _, err := group.histogramValue.Add(h) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true } } // Otherwise the aggregation contained floats @@ -2827,7 +2930,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // point in copying the histogram in that case. } else { group.hasFloat = true - group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean) + group.floatValue, group.floatKahanC = kahanSumInc(f, group.floatValue, group.floatKahanC) } case parser.AVG: @@ -2835,15 +2938,19 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix if h != nil { group.hasHistogram = true if group.histogramValue != nil { - left := h.Copy().Div(float64(group.groupCount)) - right := group.histogramValue.Copy().Div(float64(group.groupCount)) + left := h.Copy().Div(group.groupCount) + right := group.histogramValue.Copy().Div(group.groupCount) toAdd, err := left.Sub(right) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true + continue } _, err = group.histogramValue.Add(toAdd) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true + continue } } // Otherwise the aggregation contained floats @@ -2851,6 +2958,22 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // point in copying the histogram in that case. } else { group.hasFloat = true + if !group.incrementalMean { + newV, newC := kahanSumInc(f, group.floatValue, group.floatKahanC) + if !math.IsInf(newV, 0) { + // The sum doesn't overflow, so we propagate it to the + // group struct and continue with the regular + // calculation of the mean value. + group.floatValue, group.floatKahanC = newV, newC + break + } + // If we are here, we know that the sum _would_ overflow. So + // instead of continue to sum up, we revert to incremental + // calculation of the mean value from here on. + group.incrementalMean = true + group.floatMean = group.floatValue / (group.groupCount - 1) + group.floatKahanC /= group.groupCount - 1 + } if math.IsInf(group.floatMean, 0) { if math.IsInf(f, 0) && (group.floatMean > 0) == (f > 0) { // The `floatMean` and `s.F` values are `Inf` of the same sign. They @@ -2868,8 +2991,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix break } } - // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.floatMean += f/float64(group.groupCount) - group.floatMean/float64(group.groupCount) + currentMean := group.floatMean + group.floatKahanC + group.floatMean, group.floatKahanC = kahanSumInc( + // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. + f/group.groupCount-currentMean/group.groupCount, + group.floatMean, + group.floatKahanC, + ) } case parser.GROUP: @@ -2892,7 +3020,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix if h == nil { // Ignore native histograms. group.groupCount++ delta := f - group.floatMean - group.floatMean += delta / float64(group.groupCount) + group.floatMean += delta / group.groupCount group.floatValue += delta * (f - group.floatMean) } @@ -2918,20 +3046,25 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) continue } - if aggr.hasHistogram { + switch { + case aggr.incompatibleHistograms: + continue + case aggr.hasHistogram: aggr.histogramValue = aggr.histogramValue.Compact(0) - } else { - aggr.floatValue = aggr.floatMean + case aggr.incrementalMean: + aggr.floatValue = aggr.floatMean + aggr.floatKahanC + default: + aggr.floatValue = (aggr.floatValue + aggr.floatKahanC) / aggr.groupCount } case parser.COUNT: - aggr.floatValue = float64(aggr.groupCount) + aggr.floatValue = aggr.groupCount case parser.STDVAR: - aggr.floatValue /= float64(aggr.groupCount) + aggr.floatValue /= aggr.groupCount case parser.STDDEV: - aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) + aggr.floatValue = math.Sqrt(aggr.floatValue / aggr.groupCount) case parser.QUANTILE: aggr.floatValue = quantile(q, aggr.heap) @@ -2942,10 +3075,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) continue } - if aggr.hasHistogram { + switch { + case aggr.incompatibleHistograms: + continue + case aggr.hasHistogram: aggr.histogramValue.Compact(0) - } else { - aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term. + default: + aggr.floatValue += aggr.floatKahanC } default: // For other aggregations, we already have the right value. @@ -2953,39 +3089,55 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix ss := &outputMatrix[ri] addToSeries(ss, enh.Ts, aggr.floatValue, aggr.histogramValue, numSteps) + ss.DropName = inputMatrix[ri].DropName } return annos } -// aggregationK evaluates topk or bottomk at one timestep on inputMatrix. +// aggregationK evaluates topk, bottomk, limitk, or limit_ratio at one timestep on inputMatrix. // Output that has the same labels as the input, but just k of them per group. // seriesToResult maps inputMatrix indexes to groups indexes. -// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk. +// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio. // For a range query, aggregates output in the seriess map. -func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { +func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { op := e.Op var s Sample var annos annotations.Annotations + // Used to short-cut the loop for LIMITK if we already collected k elements for every group + groupsRemaining := len(groups) for i := range groups { groups[i].seen = false } +seriesLoop: for si := range inputMatrix { f, _, ok := ev.nextValues(enh.Ts, &inputMatrix[si]) if !ok { continue } - s = Sample{Metric: inputMatrix[si].Metric, F: f} + s = Sample{Metric: inputMatrix[si].Metric, F: f, DropName: inputMatrix[si].DropName} group := &groups[seriesToResult[si]] // Initialize this group if it's the first time we've seen it. if !group.seen { - *group = groupedAggregation{ - seen: true, - heap: make(vectorByValueHeap, 1, k), + // LIMIT_RATIO is a special case, as we may not add this very sample to the heap, + // while we also don't know the final size of it. + if op == parser.LIMIT_RATIO { + *group = groupedAggregation{ + seen: true, + heap: make(vectorByValueHeap, 0), + } + if ratiosampler.AddRatioSample(r, &s) { + heap.Push(&group.heap, &s) + } + } else { + *group = groupedAggregation{ + seen: true, + heap: make(vectorByValueHeap, 1, k), + } + group.heap[0] = s } - group.heap[0] = s continue } @@ -3016,6 +3168,26 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma } } + case parser.LIMITK: + if len(group.heap) < k { + heap.Push(&group.heap, &s) + } + // LIMITK optimization: early break if we've added K elem to _every_ group, + // especially useful for large timeseries where the user is exploring labels via e.g. + // limitk(10, my_metric) + if !group.groupAggrComplete && len(group.heap) == k { + group.groupAggrComplete = true + groupsRemaining-- + if groupsRemaining == 0 { + break seriesLoop + } + } + + case parser.LIMIT_RATIO: + if ratiosampler.AddRatioSample(r, &s) { + heap.Push(&group.heap, &s) + } + default: panic(fmt.Errorf("expected aggregation operator but got %q", op)) } @@ -3028,16 +3200,16 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma mat = make(Matrix, 0, len(groups)) } - add := func(lbls labels.Labels, f float64) { + add := func(lbls labels.Labels, f float64, dropName bool) { // If this could be an instant query, add directly to the matrix so the result is in consistent order. if ev.endTimestamp == ev.startTimestamp { - mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}}) + mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}, DropName: dropName}) } else { // Otherwise the results are added into seriess elements. hash := lbls.Hash() ss, ok := seriess[hash] if !ok { - ss = Series{Metric: lbls} + ss = Series{Metric: lbls, DropName: dropName} } addToSeries(&ss, enh.Ts, f, nil, numSteps) seriess[hash] = ss @@ -3054,7 +3226,7 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma sort.Sort(sort.Reverse(aggr.heap)) } for _, v := range aggr.heap { - add(v.Metric, v.F) + add(v.Metric, v.F, v.DropName) } case parser.BOTTOMK: @@ -3063,7 +3235,12 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma sort.Sort(sort.Reverse((*vectorByReverseValueHeap)(&aggr.heap))) } for _, v := range aggr.heap { - add(v.Metric, v.F) + add(v.Metric, v.F, v.DropName) + } + + case parser.LIMITK, parser.LIMIT_RATIO: + for _, v := range aggr.heap { + add(v.Metric, v.F, v.DropName) } } } @@ -3071,7 +3248,7 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma return mat, annos } -// aggregationK evaluates count_values on vec. +// aggregationCountValues evaluates count_values on vec. // Outputs as many series per group as there are values in the input. func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) { type groupCount struct { @@ -3115,6 +3292,30 @@ func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping [] return enh.Out, nil } +func (ev *evaluator) cleanupMetricLabels(v parser.Value) { + if v.Type() == parser.ValueTypeMatrix { + mat := v.(Matrix) + for i := range mat { + if mat[i].DropName { + mat[i].Metric = mat[i].Metric.DropMetricName() + } + } + if mat.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + } else if v.Type() == parser.ValueTypeVector { + vec := v.(Vector) + for i := range vec { + if vec[i].DropName { + vec[i].Metric = vec[i].Metric.DropMetricName() + } + } + if vec.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + } +} + func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, numSteps int) { if h == nil { if ss.Floats == nil { @@ -3390,6 +3591,12 @@ func setOffsetForAtModifier(evalTime int64, expr parser.Expr) { // required for correctness. func detectHistogramStatsDecoding(expr parser.Expr) { parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + if n, ok := node.(*parser.BinaryExpr); ok { + detectHistogramStatsDecoding(n.LHS) + detectHistogramStatsDecoding(n.RHS) + return fmt.Errorf("stop") + } + n, ok := (node).(*parser.VectorSelector) if !ok { return nil @@ -3419,6 +3626,56 @@ func makeInt64Pointer(val int64) *int64 { return valp } +// RatioSampler allows unit-testing (previously: Randomizer). +type RatioSampler interface { + // Return this sample "offset" between [0.0, 1.0] + sampleOffset(ts int64, sample *Sample) float64 + AddRatioSample(r float64, sample *Sample) bool +} + +// HashRatioSampler uses Hash(labels.String()) / maxUint64 as a "deterministic" +// value in [0.0, 1.0]. +type HashRatioSampler struct{} + +var ratiosampler RatioSampler = NewHashRatioSampler() + +func NewHashRatioSampler() *HashRatioSampler { + return &HashRatioSampler{} +} + +func (s *HashRatioSampler) sampleOffset(ts int64, sample *Sample) float64 { + const ( + float64MaxUint64 = float64(math.MaxUint64) + ) + return float64(sample.Metric.Hash()) / float64MaxUint64 +} + +func (s *HashRatioSampler) AddRatioSample(ratioLimit float64, sample *Sample) bool { + // If ratioLimit >= 0: add sample if sampleOffset is lesser than ratioLimit + // + // 0.0 ratioLimit 1.0 + // [---------|--------------------------] + // [#########...........................] + // + // e.g.: + // sampleOffset==0.3 && ratioLimit==0.4 + // 0.3 < 0.4 ? --> add sample + // + // Else if ratioLimit < 0: add sample if rand() return the "complement" of ratioLimit>=0 case + // (loosely similar behavior to negative array index in other programming languages) + // + // 0.0 1+ratioLimit 1.0 + // [---------|--------------------------] + // [.........###########################] + // + // e.g.: + // sampleOffset==0.3 && ratioLimit==-0.6 + // 0.3 >= 0.4 ? --> don't add sample + sampleOffset := s.sampleOffset(sample.T, sample) + return (ratioLimit >= 0 && sampleOffset < ratioLimit) || + (ratioLimit < 0 && sampleOffset >= (1.0+ratioLimit)) +} + type histogramStatsSeries struct { storage.Series } diff --git a/promql/engine_test.go b/promql/engine_test.go index b144c11747..e4171eb5bd 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -17,16 +17,14 @@ import ( "context" "errors" "fmt" - "math" - "os" "sort" "strconv" + "strings" "sync" "testing" "time" "github.com/stretchr/testify/require" - "go.uber.org/goleak" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -49,20 +47,15 @@ const ( ) func TestMain(m *testing.M) { - goleak.VerifyTestMain(m) + // Enable experimental functions testing + parser.EnableExperimentalFunctions = true + testutil.TolerantVerifyLeak(m) } func TestQueryConcurrency(t *testing.T) { maxConcurrency := 10 - dir, err := os.MkdirTemp("", "test_concurrency") - require.NoError(t, err) - defer os.RemoveAll(dir) - queryTracker := promql.NewActiveQueryTracker(dir, maxConcurrency, nil) - t.Cleanup(func() { - require.NoError(t, queryTracker.Close()) - }) - + queryTracker := promql.NewActiveQueryTracker(t.TempDir(), maxConcurrency, nil) opts := promql.EngineOpts{ Logger: nil, Reg: nil, @@ -70,15 +63,17 @@ func TestQueryConcurrency(t *testing.T) { Timeout: 100 * time.Second, ActiveQueryTracker: queryTracker, } + engine := promqltest.NewTestEngineWithOpts(t, opts) - engine := promql.NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) - defer cancelCtx() + t.Cleanup(cancelCtx) block := make(chan struct{}) processing := make(chan struct{}) done := make(chan int) - defer close(done) + t.Cleanup(func() { + close(done) + }) f := func(context.Context) error { select { @@ -163,7 +158,7 @@ func TestQueryTimeout(t *testing.T) { MaxSamples: 10, Timeout: 5 * time.Millisecond, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -188,7 +183,7 @@ func TestQueryCancel(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -236,11 +231,11 @@ func (q *errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*lab return errSeriesSet{err: q.err} } -func (*errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (*errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (*errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (*errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } func (*errQuerier) Close() error { return nil } @@ -262,7 +257,7 @@ func TestQueryError(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) errStorage := promql.ErrStorage{errors.New("storage error")} queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return &errQuerier{err: errStorage}, nil @@ -327,276 +322,276 @@ func TestSelectHintsSetCorrectly(t *testing.T) { { query: "foo", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000}, + {Start: 5001, End: 10000}, }, }, { query: "foo @ 15", start: 10000, expected: []*storage.SelectHints{ - {Start: 10000, End: 15000}, + {Start: 10001, End: 15000}, }, }, { query: "foo @ 1", start: 10000, expected: []*storage.SelectHints{ - {Start: -4000, End: 1000}, + {Start: -3999, End: 1000}, }, }, { query: "foo[2m]", start: 200000, expected: []*storage.SelectHints{ - {Start: 80000, End: 200000, Range: 120000}, + {Start: 80001, End: 200000, Range: 120000}, }, }, { query: "foo[2m] @ 180", start: 200000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, + {Start: 60001, End: 180000, Range: 120000}, }, }, { query: "foo[2m] @ 300", start: 200000, expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000}, + {Start: 180001, End: 300000, Range: 120000}, }, }, { query: "foo[2m] @ 60", start: 200000, expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000}, + {Start: -59999, End: 60000, Range: 120000}, }, }, { query: "foo[2m] offset 2m", start: 300000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, + {Start: 60001, End: 180000, Range: 120000}, }, }, { query: "foo[2m] @ 200 offset 2m", start: 300000, expected: []*storage.SelectHints{ - {Start: -40000, End: 80000, Range: 120000}, + {Start: -39999, End: 80000, Range: 120000}, }, }, { query: "foo[2m:1s]", start: 300000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Step: 1000}, + {Start: 175001, End: 300000, Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s])", start: 300000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, + {Start: 75001, End: 200000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, + {Start: -24999, End: 100000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 165000, End: 290000, Func: "count_over_time", Step: 1000}, + {Start: 165001, End: 290000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 155000, End: 280000, Func: "count_over_time", Step: 1000}, + {Start: 155001, End: 280000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, + {Start: -44999, End: 80000, Func: "count_over_time", Step: 1000}, }, }, { query: "foo", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: 5000, End: 20000, Step: 1000}, + {Start: 5001, End: 20000, Step: 1000}, }, }, { query: "foo @ 15", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: 10000, End: 15000, Step: 1000}, + {Start: 10001, End: 15000, Step: 1000}, }, }, { query: "foo @ 1", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: -4000, End: 1000, Step: 1000}, + {Start: -3999, End: 1000, Step: 1000}, }, }, { query: "rate(foo[2m] @ 180)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 60001, End: 180000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] @ 300)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 180001, End: 300000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] @ 60)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000}, + {Start: -59999, End: 60000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m])", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 80001, End: 500000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 60001, End: 380000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m:1s])", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "rate", Step: 1000}, + {Start: 175001, End: 500000, Func: "rate", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 500000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 165000, End: 490000, Func: "count_over_time", Step: 1000}, + {Start: 165001, End: 490000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, + {Start: 75001, End: 200000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, + {Start: -24999, End: 100000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 155000, End: 480000, Func: "count_over_time", Step: 1000}, + {Start: 155001, End: 480000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, + {Start: -44999, End: 80000, Func: "count_over_time", Step: 1000}, }, }, { query: "sum by (dim1) (foo)", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, + {Start: 5001, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, }, }, { query: "sum without (dim1) (foo)", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, + {Start: 5001, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, }, }, { query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000, expected: []*storage.SelectHints{ - {Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000}, + {Start: 9001, End: 10000, Func: "avg_over_time", Range: 1000}, }, }, { query: "sum by (dim1) (max by (dim2) (foo))", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, + {Start: 5001, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, }, }, { query: "(max by (dim1) (foo))[5s:1s]", start: 10000, expected: []*storage.SelectHints{ - {Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000}, + {Start: 1, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000}, }, }, { query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000, expected: []*storage.SelectHints{ - {Start: 95000, End: 120000, Func: "sum", By: true, Step: 5000}, - {Start: 95000, End: 120000, Func: "max", By: true, Step: 5000}, + {Start: 95001, End: 120000, Func: "sum", By: true, Step: 5000}, + {Start: 95001, End: 120000, Func: "max", By: true, Step: 5000}, }, }, { query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 45001, End: 50000, Step: 1000}, + {Start: 245001, End: 250000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 45001, End: 50000, Step: 1000}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 48001, End: 50000, Step: 1000, Func: "rate", Range: 2000}, + {Start: 245001, End: 250000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, + {Start: 43001, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 95001, End: 500000, Step: 1000}, }, }, { query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 655000, End: 780000, Step: 1000, Func: "rate"}, + {Start: 43001, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 655001, End: 780000, Step: 1000, Func: "rate"}, }, }, { // Hints are based on the inner most subquery timestamp. query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000, expected: []*storage.SelectHints{ - {Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000}, + {Start: -149999, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000}, }, }, { // Hints are based on the inner most subquery timestamp. query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`, expected: []*storage.SelectHints{ - {Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000}, + {Start: 2800001, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000}, }, }, } { t.Run(tc.query, func(t *testing.T) { - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) hintsRecorder := &noopHintRecordingQueryable{} var ( @@ -627,7 +622,7 @@ func TestEngineShutdown(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) block := make(chan struct{}) @@ -763,7 +758,7 @@ load 10s t.Run(fmt.Sprintf("%d query=%s", i, c.Query), func(t *testing.T) { var err error var qry promql.Query - engine := newTestEngine() + engine := newTestEngine(t) if c.Interval == 0 { qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) } else { @@ -941,22 +936,20 @@ load 10s }, }, { - Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]", + Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 10, - TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 20/5 (using 59s so we always return 6 samples - // as if we run a query on 00 looking back 60 seconds we will return 7 samples; - // see next test). + TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 4 TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 24, }, }, { - Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", + Query: "max_over_time(metricWith1SampleEvery10Seconds[61s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 11, TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) * 4 + 2 as - // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. + // max_over_time(metricWith1SampleEvery10Seconds[61s]) @ 190 and 200 will return 7 samples. TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 26, }, @@ -965,10 +958,9 @@ load 10s Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 78, - TotalSamples: 338, // (1 histogram (size 13 HPoint) / 10 seconds * 60 seconds) * 4 + 2 * 13 as - // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. + TotalSamples: 312, // (1 histogram (size 13) / 10 seconds * 60 seconds) * 4 TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 201000: 338, + 201000: 312, }, }, { @@ -1305,7 +1297,7 @@ load 10s for _, c := range cases { t.Run(c.Query, func(t *testing.T) { opts := promql.NewPrometheusQueryOpts(true, 0) - engine := promqltest.NewTestEngine(true, 0, promqltest.DefaultMaxSamplesPerQuery) + engine := promqltest.NewTestEngine(t, true, 0, promqltest.DefaultMaxSamplesPerQuery) runQuery := func(expErr error) *stats.Statistics { var err error @@ -1332,7 +1324,7 @@ load 10s if c.SkipMaxCheck { return } - engine = promqltest.NewTestEngine(true, 0, stats.Samples.PeakSamples-1) + engine = promqltest.NewTestEngine(t, true, 0, stats.Samples.PeakSamples-1) runQuery(promql.ErrTooManySamples(env)) }) } @@ -1433,23 +1425,23 @@ load 10s }, { // The peak samples in memory is during the first evaluation: - // - Subquery takes 22 samples, 11 for each bigmetric, - // - Result is calculated per series where the series samples is buffered, hence 11 more here. + // - Subquery takes 22 samples, 11 for each bigmetric, but samples on the left bound won't be evaluated. + // - Result is calculated per series where the series samples is buffered, hence 10 more here. // - The result of two series is added before the last series buffer is discarded, so 2 more here. - // Hence at peak it is 22 (subquery) + 11 (buffer of a series) + 2 (result from 2 series). + // Hence at peak it is 22 (subquery) + 10 (buffer of a series) + 2 (result from 2 series). // The subquery samples and the buffer is discarded before duplicating. Query: `rate(bigmetric[10s:1s] @ 10)`, - MaxSamples: 35, + MaxSamples: 34, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, }, { // Here the reasoning is same as above. But LHS and RHS are done one after another. - // So while one of them takes 35 samples at peak, we need to hold the 2 sample + // So while one of them takes 34 samples at peak, we need to hold the 2 sample // result of the other till then. Query: `rate(bigmetric[10s:1s] @ 10) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 37, + MaxSamples: 36, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, @@ -1458,25 +1450,25 @@ load 10s // promql.Sample as above but with only 1 part as step invariant. // Here the peak is caused by the non-step invariant part as it touches more time range. // Hence at peak it is 2*21 (subquery from 0s to 20s) - // + 11 (buffer of a series per evaluation) + // + 10 (buffer of a series per evaluation) // + 6 (result from 2 series at 3 eval times). Query: `rate(bigmetric[10s:1s]) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 59, + MaxSamples: 58, Start: time.Unix(10, 0), End: time.Unix(20, 0), Interval: 5 * time.Second, }, { // Nested subquery. - // We saw that innermost rate takes 35 samples which is still the peak + // We saw that innermost rate takes 34 samples which is still the peak // since the other two subqueries just duplicate the result. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, - MaxSamples: 35, + MaxSamples: 34, Start: time.Unix(10, 0), }, { // Nested subquery. - // Now the outmost subquery produces more samples than inner most rate. + // Now the outermost subquery produces more samples than inner most rate. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`, MaxSamples: 36, Start: time.Unix(10, 0), @@ -1485,7 +1477,7 @@ load 10s for _, c := range cases { t.Run(c.Query, func(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) testFunc := func(expError error) { var err error var qry promql.Query @@ -1506,18 +1498,18 @@ load 10s } // Within limit. - engine = promqltest.NewTestEngine(false, 0, c.MaxSamples) + engine = promqltest.NewTestEngine(t, false, 0, c.MaxSamples) testFunc(nil) // Exceeding limit. - engine = promqltest.NewTestEngine(false, 0, c.MaxSamples-1) + engine = promqltest.NewTestEngine(t, false, 0, c.MaxSamples-1) testFunc(promql.ErrTooManySamples(env)) }) } } func TestAtModifier(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) storage := promqltest.LoadedStorage(t, ` load 10s metric{job="1"} 0+1x1000 @@ -1585,11 +1577,11 @@ load 1ms start: 10, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}}, + Floats: []promql.FPoint{{F: 29, T: 290000}, {F: 30, T: 300000}}, Metric: lbls1, }, promql.Series{ - Floats: []promql.FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}}, + Floats: []promql.FPoint{{F: 58, T: 290000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, @@ -1598,7 +1590,7 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}}, + Floats: []promql.FPoint{{F: 2, T: -1000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, @@ -1607,7 +1599,7 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, + Floats: []promql.FPoint{{F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, Metric: lblsneg, }, }, @@ -1616,7 +1608,7 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, + Floats: []promql.FPoint{{F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, Metric: lblsms, }, }, @@ -1713,7 +1705,8 @@ load 1ms {F: 3600, T: 6 * 60 * 1000}, {F: 3600, T: 7 * 60 * 1000}, }, - Metric: labels.EmptyLabels(), + Metric: labels.EmptyLabels(), + DropName: true, }, }, }, @@ -1929,20 +1922,24 @@ func TestSubquerySelector(t *testing.T) { nil, promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + DropName: true, }, promql.Series{ - Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + DropName: true, }, promql.Series{ - Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + DropName: true, }, promql.Series{ - Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + DropName: true, }, }, nil, @@ -1995,7 +1992,7 @@ func TestSubquerySelector(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) storage := promqltest.LoadedStorage(t, tst.loadString) t.Cleanup(func() { storage.Close() }) @@ -2044,7 +2041,7 @@ func TestQueryLogger_basic(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) queryExec := func() { ctx, cancelCtx := context.WithCancel(context.Background()) @@ -2095,7 +2092,7 @@ func TestQueryLogger_fields(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) @@ -2124,7 +2121,7 @@ func TestQueryLogger_error(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) @@ -3007,7 +3004,7 @@ func TestEngineOptsValidation(t *testing.T) { } for _, c := range cases { - eng := promql.NewEngine(c.opts) + eng := promqltest.NewTestEngineWithOpts(t, c.opts) _, err1 := eng.NewInstantQuery(context.Background(), nil, nil, c.query, time.Unix(10, 0)) _, err2 := eng.NewRangeQuery(context.Background(), nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) if c.fail { @@ -3020,8 +3017,31 @@ func TestEngineOptsValidation(t *testing.T) { } } +func TestEngine_Close(t *testing.T) { + t.Run("nil engine", func(t *testing.T) { + var ng *promql.Engine + require.NoError(t, ng.Close()) + }) + + t.Run("non-nil engine", func(t *testing.T) { + ng := promql.NewEngine(promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 0, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: nil, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: false, + LookbackDelta: 0, + EnableDelayedNameRemoval: true, + }) + require.NoError(t, ng.Close()) + }) +} + func TestInstantQueryWithRangeVectorSelector(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) baseT := timestamp.Time(0) storage := promqltest.LoadedStorage(t, ` @@ -3038,7 +3058,7 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { ts time.Time }{ "matches series with points in range": { - expr: "some_metric[1m]", + expr: "some_metric[2m]", ts: baseT.Add(2 * time.Minute), expected: promql.Matrix{ { @@ -3074,7 +3094,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { { Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"), Floats: []promql.FPoint{ - {T: timestamp.FromTime(baseT), F: 0}, {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1}, {T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3}, }, @@ -3216,7 +3235,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { seriesName := "sparse_histogram_series" seriesNameOverTime := "sparse_histogram_series_over_time" - engine := newTestEngine() + engine := newTestEngine(t) ts := idx0 * int64(10*time.Minute/time.Millisecond) app := storage.Appender(context.Background()) @@ -3295,12 +3314,12 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { newTs := ts + offset*int64(time.Minute/time.Millisecond) // sum_over_time(). - queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1) + queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels(), DropName: true}}) // avg_over_time(). - queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1) + queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels(), DropName: true}}) }) idx0++ } @@ -3486,7 +3505,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { for _, c := range cases { for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) storage := teststorage.New(t) t.Cleanup(func() { storage.Close() }) @@ -3542,171 +3561,6 @@ func TestNativeHistogram_SubOperator(t *testing.T) { } } -func TestNativeHistogram_MulDivOperator(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - originalHistogram := histogram.Histogram{ - Schema: 0, - Count: 21, - Sum: 33, - ZeroThreshold: 0.001, - ZeroCount: 3, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{3, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []int64{3, 0, 0}, - } - - cases := []struct { - scalar float64 - histogram histogram.Histogram - expectedMul histogram.FloatHistogram - expectedDiv histogram.FloatHistogram - }{ - { - scalar: 3, - histogram: originalHistogram, - expectedMul: histogram.FloatHistogram{ - Schema: 0, - Count: 63, - Sum: 99, - ZeroThreshold: 0.001, - ZeroCount: 9, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{9, 9, 9}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{9, 9, 9}, - }, - expectedDiv: histogram.FloatHistogram{ - Schema: 0, - Count: 7, - Sum: 11, - ZeroThreshold: 0.001, - ZeroCount: 1, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{1, 1, 1}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{1, 1, 1}, - }, - }, - { - scalar: 0, - histogram: originalHistogram, - expectedMul: histogram.FloatHistogram{ - Schema: 0, - Count: 0, - Sum: 0, - ZeroThreshold: 0.001, - ZeroCount: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{0, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{0, 0, 0}, - }, - expectedDiv: histogram.FloatHistogram{ - Schema: 0, - Count: math.Inf(1), - Sum: math.Inf(1), - ZeroThreshold: 0.001, - ZeroCount: math.Inf(1), - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, - }, - }, - } - - idx0 := int64(0) - for _, c := range cases { - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - floatSeriesName := "float_series" - - engine := newTestEngine() - - ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - h := c.histogram - lbls := labels.FromStrings("__name__", seriesName) - // Since we mutate h later, we need to create a copy here. - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) - } - require.NoError(t, err) - _, err = app.Append(0, labels.FromStrings("__name__", floatSeriesName), ts, c.scalar) - require.NoError(t, err) - require.NoError(t, app.Commit()) - - queryAndCheck := func(queryString string, exp promql.Vector) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - testutil.RequireEqual(t, exp, vector) - } - - // histogram * scalar. - queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // scalar * histogram. - queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // histogram * float. - queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // float * histogram. - queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // histogram / scalar. - queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) - - // histogram / float. - queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) - }) - idx0++ - } - } -} - func TestQueryLookbackDelta(t *testing.T) { var ( load = `load 5m @@ -3724,43 +3578,43 @@ metric 0 1 2 }{ { name: "default lookback delta", - ts: lastDatapointTs.Add(defaultLookbackDelta), + ts: lastDatapointTs.Add(defaultLookbackDelta - time.Millisecond), expectSamples: true, }, { name: "outside default lookback delta", - ts: lastDatapointTs.Add(defaultLookbackDelta + time.Millisecond), + ts: lastDatapointTs.Add(defaultLookbackDelta), expectSamples: false, }, { name: "custom engine lookback delta", - ts: lastDatapointTs.Add(10 * time.Minute), + ts: lastDatapointTs.Add(10*time.Minute - time.Millisecond), engineLookback: 10 * time.Minute, expectSamples: true, }, { name: "outside custom engine lookback delta", - ts: lastDatapointTs.Add(10*time.Minute + time.Millisecond), + ts: lastDatapointTs.Add(10 * time.Minute), engineLookback: 10 * time.Minute, expectSamples: false, }, { name: "custom query lookback delta", - ts: lastDatapointTs.Add(20 * time.Minute), + ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond), engineLookback: 10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: true, }, { name: "outside custom query lookback delta", - ts: lastDatapointTs.Add(20*time.Minute + time.Millisecond), + ts: lastDatapointTs.Add(20 * time.Minute), engineLookback: 10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: false, }, { name: "negative custom query lookback delta", - ts: lastDatapointTs.Add(20 * time.Minute), + ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond), engineLookback: -10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: true, @@ -3770,7 +3624,7 @@ metric 0 1 2 for _, c := range cases { c := c t.Run(c.name, func(t *testing.T) { - engine := promqltest.NewTestEngine(false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery) + engine := promqltest.NewTestEngine(t, false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery) storage := promqltest.LoadedStorage(t, load) t.Cleanup(func() { storage.Close() }) @@ -3796,3 +3650,134 @@ func makeInt64Pointer(val int64) *int64 { *valp = val return valp } + +func TestHistogramCopyFromIteratorRegression(t *testing.T) { + // Loading the following histograms creates two chunks because there's a + // counter reset. Not only the counter is lower in the last histogram + // but also there's missing buckets. + // This in turns means that chunk iterators will have different spans. + load := `load 1m +histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:1 count:1 buckets:[1]}} +` + storage := promqltest.LoadedStorage(t, load) + t.Cleanup(func() { storage.Close() }) + engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) + + verify := func(t *testing.T, qry promql.Query, expected []histogram.FloatHistogram) { + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + + m, ok := res.Value.(promql.Matrix) + require.True(t, ok) + + require.Len(t, m, 1) + series := m[0] + + require.Empty(t, series.Floats) + require.Len(t, series.Histograms, len(expected)) + for i, e := range expected { + series.Histograms[i].H.CounterResetHint = histogram.UnknownCounterReset // Don't care. + require.Equal(t, &e, series.Histograms[i].H) + } + } + + qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[90s])", time.Unix(0, 0), time.Unix(0, 0).Add(60*time.Second), time.Minute) + require.NoError(t, err) + verify(t, qry, []histogram.FloatHistogram{ + { + Count: 3, + Sum: 3, // Increase from 4 to 6 is 2. Interpolation adds 1. + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram. + PositiveBuckets: []float64{1.5, 1.5}, // Increase from 2 to 3 is 1 in both buckets. Interpolation adds 0.5. + }, + }) + + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[61s]", time.Unix(0, 0).Add(2*time.Minute)) + require.NoError(t, err) + verify(t, qry, []histogram.FloatHistogram{ + { + Count: 6, + Sum: 6, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []float64{3, 3}, + }, + { + Count: 1, + Sum: 1, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []float64{1}, + }, + }) +} + +func TestRateAnnotations(t *testing.T) { + testCases := map[string]struct { + data string + expr string + expectedWarningAnnotations []string + expectedInfoAnnotations []string + }{ + "info annotation when two samples are selected": { + data: ` + series 1 2 + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{ + `PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "series" (1:6)`, + }, + }, + "no info annotations when no samples": { + data: ` + series + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when selecting one sample": { + data: ` + series 1 2 + `, + expr: "rate(series[10s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when no samples due to mixed data types": { + data: ` + series{label="a"} 1 {{schema:1 sum:15 count:10 buckets:[1 2 3]}} + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{ + `PromQL warning: encountered a mix of histograms and floats for metric name "series" (1:6)`, + }, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when selecting two native histograms": { + data: ` + series{label="a"} {{schema:1 sum:10 count:5 buckets:[1 2 3]}} {{schema:1 sum:15 count:10 buckets:[1 2 3]}} + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + store := promqltest.LoadedStorage(t, "load 1m\n"+strings.TrimSpace(testCase.data)) + t.Cleanup(func() { _ = store.Close() }) + + engine := newTestEngine(t) + query, err := engine.NewInstantQuery(context.Background(), store, nil, testCase.expr, timestamp.Time(0).Add(1*time.Minute)) + require.NoError(t, err) + t.Cleanup(query.Close) + + res := query.Exec(context.Background()) + require.NoError(t, res.Err) + + warnings, infos := res.Warnings.AsStrings(testCase.expr, 0, 0) + testutil.RequireEqual(t, testCase.expectedWarningAnnotations, warnings) + testutil.RequireEqual(t, testCase.expectedInfoAnnotations, infos) + }) + } +} diff --git a/promql/functions.go b/promql/functions.go index dcc2cd7590..c4a7ee4a46 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -14,6 +14,7 @@ package promql import ( + "context" "errors" "fmt" "math" @@ -97,9 +98,10 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod lastT = samples.Histograms[numSamplesMinusOne].T var newAnnos annotations.Annotations resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange()) + annos.Merge(newAnnos) if resultHistogram == nil { // The histograms are not compatible with each other. - return enh.Out, annos.Merge(newAnnos) + return enh.Out, annos } case len(samples.Floats) > 1: numSamplesMinusOne = len(samples.Floats) - 1 @@ -129,10 +131,18 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod sampledInterval := float64(lastT-firstT) / 1000 averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne) - // If the first/last samples are close to the boundaries of the range, - // extrapolate the result. This is as we expect that another sample - // will exist given the spacing between samples we've seen thus far, - // with an allowance for noise. + // If samples are close enough to the (lower or upper) boundary of the + // range, we extrapolate the rate all the way to the boundary in + // question. "Close enough" is defined as "up to 10% more than the + // average duration between samples within the range", see + // extrapolationThreshold below. Essentially, we are assuming a more or + // less regular spacing between samples, and if we don't see a sample + // where we would expect one, we assume the series does not cover the + // whole range, but starts and/or ends within the range. We still + // extrapolate the rate in this case, but not all the way to the + // boundary, but only by half of the average duration between samples + // (which is our guess for where the series actually starts or ends). + extrapolationThreshold := averageDurationBetweenSamples * 1.1 extrapolateToInterval := sampledInterval @@ -178,17 +188,29 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod // Otherwise, it returns the calculated histogram and an empty annotation. func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) { prev := points[0].H + usingCustomBuckets := prev.UsesCustomBuckets() last := points[len(points)-1].H if last == nil { return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } + minSchema := prev.Schema if last.Schema < minSchema { minSchema = last.Schema } + if last.UsesCustomBuckets() != usingCustomBuckets { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } + var annos annotations.Annotations + // We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point, + // so check the first and last point now. + if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos)) + } + // First iteration to find out two things: // - What's the smallest relevant schema? // - Are all data points histograms? @@ -208,6 +230,9 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra if curr.Schema < minSchema { minSchema = curr.Schema } + if curr.UsesCustomBuckets() != usingCustomBuckets { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } } h := last.CopyToSchema(minSchema) @@ -241,7 +266,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra } h.CounterResetHint = histogram.GaugeType - return h.Compact(0), nil + return h.Compact(0), annos } // === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === @@ -325,7 +350,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects // how trends in historical data will affect the current data. A higher trend factor increases the influence. // of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". -func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // The smoothing factor argument. @@ -390,17 +415,22 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - // In case the labels are the same, NaN should sort to the bottom, so take - // ascending sort with NaN first and reverse it. - var anno annotations.Annotations - vals[0], anno = funcSort(vals, args, enh) - labels := stringSliceFromArgs(args[1:]) + // First, sort by the full label set. This ensures a consistent ordering in case sorting by the + // labels provided as arguments is not conclusive. slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { - // Iterate over each given label + return labels.Compare(a.Metric, b.Metric) + }) + + labels := stringSliceFromArgs(args[1:]) + // Next, sort by the labels provided as arguments. + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + // Iterate over each given label. for _, label := range labels { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) + // If we encounter multiple samples with the same label values, the sorting which was + // performed in the first step will act as a "tie breaker". if lv1 == lv2 { continue } @@ -415,22 +445,27 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNode return 0 }) - return vals[0].(Vector), anno + return vals[0].(Vector), nil } // === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - // In case the labels are the same, NaN should sort to the bottom, so take - // ascending sort with NaN first and reverse it. - var anno annotations.Annotations - vals[0], anno = funcSortDesc(vals, args, enh) - labels := stringSliceFromArgs(args[1:]) + // First, sort by the full label set. This ensures a consistent ordering in case sorting by the + // labels provided as arguments is not conclusive. slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { - // Iterate over each given label + return labels.Compare(b.Metric, a.Metric) + }) + + labels := stringSliceFromArgs(args[1:]) + // Next, sort by the labels provided as arguments. + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + // Iterate over each given label. for _, label := range labels { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) + // If we encounter multiple samples with the same label values, the sorting which was + // performed in the first step will act as a "tie breaker". if lv1 == lv2 { continue } @@ -445,21 +480,25 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval return 0 }) - return vals[0].(Vector), anno + return vals[0].(Vector), nil } // === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].F - max := vals[2].(Vector)[0].F - if max < min { + minVal := vals[1].(Vector)[0].F + maxVal := vals[2].(Vector)[0].F + if maxVal < minVal { return enh.Out, nil } for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: math.Max(min, math.Min(max, el.F)), + Metric: el.Metric, + F: math.Max(minVal, math.Min(maxVal, el.F)), + DropName: true, }) } return enh.Out, nil @@ -468,11 +507,15 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - max := vals[1].(Vector)[0].F + maxVal := vals[1].(Vector)[0].F for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: math.Min(max, el.F), + Metric: el.Metric, + F: math.Min(maxVal, el.F), + DropName: true, }) } return enh.Out, nil @@ -481,11 +524,15 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].F + minVal := vals[1].(Vector)[0].F for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: math.Max(min, el.F), + Metric: el.Metric, + F: math.Max(minVal, el.F), + DropName: true, }) } return enh.Out, nil @@ -506,8 +553,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper for _, el := range vec { f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: f, + Metric: el.Metric, + F: f, + DropName: true, }) } return enh.Out, nil @@ -573,9 +621,28 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return vec, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - var mean, count, c float64 + var ( + sum, mean, count, kahanC float64 + incrementalMean bool + ) for _, f := range s.Floats { count++ + if !incrementalMean { + newSum, newC := kahanSumInc(f.F, sum, kahanC) + // Perform regular mean calculation as long as + // the sum doesn't overflow and (in any case) + // for the first iteration (even if we start + // with ±Inf) to not run into division-by-zero + // problems below. + if count == 1 || !math.IsInf(newSum, 0) { + sum, kahanC = newSum, newC + continue + } + // Handle overflow by reverting to incremental calculation of the mean value. + incrementalMean = true + mean = sum / (count - 1) + kahanC /= count - 1 + } if math.IsInf(mean, 0) { if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) { // The `mean` and `f.F` values are `Inf` of the same sign. They @@ -593,13 +660,13 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - mean, c = kahanSumInc(f.F/count-mean/count, mean, c) + correctedMean := mean + kahanC + mean, kahanC = kahanSumInc(f.F/count-correctedMean/count, mean, kahanC) } - - if math.IsInf(mean, 0) { - return mean + if incrementalMean { + return mean + kahanC } - return mean + c + return (sum + kahanC) / count }), nil } @@ -665,13 +732,13 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - max := s.Floats[0].F + maxVal := s.Floats[0].F for _, f := range s.Floats { - if f.F > max || math.IsNaN(max) { - max = f.F + if f.F > maxVal || math.IsNaN(maxVal) { + maxVal = f.F } } - return max + return maxVal }), nil } @@ -685,13 +752,13 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - min := s.Floats[0].F + minVal := s.Floats[0].F for _, f := range s.Floats { - if f.F < min || math.IsNaN(min) { - min = f.F + if f.F < minVal || math.IsNaN(minVal) { + minVal = f.F } } - return min + return minVal }), nil } @@ -837,9 +904,13 @@ func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *Eval func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { for _, el := range vals[0].(Vector) { if el.H == nil { // Process only float samples. + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: f(el.F), + Metric: el.Metric, + F: f(el.F), + DropName: true, }) } } @@ -983,9 +1054,13 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: float64(el.T) / 1000, + Metric: el.Metric, + F: float64(el.T) / 1000, + DropName: true, }) } return enh.Out, nil @@ -1092,9 +1167,13 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN if sample.H == nil { continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: sample.H.Count, + Metric: sample.Metric, + F: sample.H.Count, + DropName: true, }) } return enh.Out, nil @@ -1109,9 +1188,13 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod if sample.H == nil { continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: sample.H.Sum, + Metric: sample.Metric, + F: sample.H.Sum, + DropName: true, }) } return enh.Out, nil @@ -1126,9 +1209,13 @@ func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNod if sample.H == nil { continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: sample.H.Sum / sample.H.Count, + Metric: sample.Metric, + F: sample.H.Sum / sample.H.Count, + DropName: true, }) } return enh.Out, nil @@ -1165,9 +1252,13 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval } variance += cVariance variance /= sample.H.Count + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: math.Sqrt(variance), + Metric: sample.Metric, + F: math.Sqrt(variance), + DropName: true, }) } return enh.Out, nil @@ -1204,9 +1295,13 @@ func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *Eval } variance += cVariance variance /= sample.H.Count + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: variance, + Metric: sample.Metric, + F: variance, + DropName: true, }) } return enh.Out, nil @@ -1223,9 +1318,13 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev if sample.H == nil { continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: histogramFraction(lower, upper, sample.H), + Metric: sample.Metric, + F: histogramFraction(lower, upper, sample.H), + DropName: true, }) } return enh.Out, nil @@ -1293,9 +1392,13 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: histogramQuantile(q, sample.H), + Metric: sample.Metric, + F: histogramQuantile(q, sample.H), + DropName: true, }) } @@ -1369,7 +1472,7 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp } // label_replace function operates only on series; does not look at timestamps or values. -func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, annotations.Annotations) { +func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { var ( dst = stringFromArg(args[1]) repl = stringFromArg(args[2]) @@ -1377,7 +1480,7 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an regexStr = stringFromArg(args[4]) ) - regex, err := regexp.Compile("^(?:" + regexStr + ")$") + regex, err := regexp.Compile("^(?s:" + regexStr + ")$") if err != nil { panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) } @@ -1385,7 +1488,7 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) } - val, ws := ev.eval(args[0]) + val, ws := ev.eval(ctx, args[0]) matrix := val.(Matrix) lb := labels.NewBuilder(labels.EmptyLabels()) @@ -1397,6 +1500,11 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an lb.Reset(el.Metric) lb.Set(dst, string(res)) matrix[i].Metric = lb.Labels() + if dst == model.MetricNameLabel { + matrix[i].DropName = false + } else { + matrix[i].DropName = el.DropName + } } } if matrix.ContainsSameLabelset() { @@ -1421,7 +1529,7 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe } // label_join function operates only on series; does not look at timestamps or values. -func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annotations.Annotations) { +func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { var ( dst = stringFromArg(args[1]) sep = stringFromArg(args[2]) @@ -1438,7 +1546,7 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst)) } - val, ws := ev.eval(args[0]) + val, ws := ev.eval(ctx, args[0]) matrix := val.(Matrix) srcVals := make([]string, len(srcLabels)) lb := labels.NewBuilder(labels.EmptyLabels()) @@ -1451,6 +1559,12 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot lb.Reset(el.Metric) lb.Set(dst, strval) matrix[i].Metric = lb.Labels() + + if dst == model.MetricNameLabel { + matrix[i].DropName = false + } else { + matrix[i].DropName = el.DropName + } } return matrix, ws @@ -1473,9 +1587,13 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo for _, el := range vals[0].(Vector) { t := time.Unix(int64(el.F), 0).UTC() + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: f(t), + Metric: el.Metric, + F: f(t), + DropName: true, }) } return enh.Out @@ -1539,82 +1657,82 @@ func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // FunctionCalls is a list of all functions supported by PromQL, including their types. var FunctionCalls = map[string]FunctionCall{ - "abs": funcAbs, - "absent": funcAbsent, - "absent_over_time": funcAbsentOverTime, - "acos": funcAcos, - "acosh": funcAcosh, - "asin": funcAsin, - "asinh": funcAsinh, - "atan": funcAtan, - "atanh": funcAtanh, - "avg_over_time": funcAvgOverTime, - "ceil": funcCeil, - "changes": funcChanges, - "clamp": funcClamp, - "clamp_max": funcClampMax, - "clamp_min": funcClampMin, - "cos": funcCos, - "cosh": funcCosh, - "count_over_time": funcCountOverTime, - "days_in_month": funcDaysInMonth, - "day_of_month": funcDayOfMonth, - "day_of_week": funcDayOfWeek, - "day_of_year": funcDayOfYear, - "deg": funcDeg, - "delta": funcDelta, - "deriv": funcDeriv, - "exp": funcExp, - "floor": funcFloor, - "histogram_avg": funcHistogramAvg, - "histogram_count": funcHistogramCount, - "histogram_fraction": funcHistogramFraction, - "histogram_quantile": funcHistogramQuantile, - "histogram_sum": funcHistogramSum, - "histogram_stddev": funcHistogramStdDev, - "histogram_stdvar": funcHistogramStdVar, - "holt_winters": funcHoltWinters, - "hour": funcHour, - "idelta": funcIdelta, - "increase": funcIncrease, - "irate": funcIrate, - "label_replace": funcLabelReplace, - "label_join": funcLabelJoin, - "ln": funcLn, - "log10": funcLog10, - "log2": funcLog2, - "last_over_time": funcLastOverTime, - "mad_over_time": funcMadOverTime, - "max_over_time": funcMaxOverTime, - "min_over_time": funcMinOverTime, - "minute": funcMinute, - "month": funcMonth, - "pi": funcPi, - "predict_linear": funcPredictLinear, - "present_over_time": funcPresentOverTime, - "quantile_over_time": funcQuantileOverTime, - "rad": funcRad, - "rate": funcRate, - "resets": funcResets, - "round": funcRound, - "scalar": funcScalar, - "sgn": funcSgn, - "sin": funcSin, - "sinh": funcSinh, - "sort": funcSort, - "sort_desc": funcSortDesc, - "sort_by_label": funcSortByLabel, - "sort_by_label_desc": funcSortByLabelDesc, - "sqrt": funcSqrt, - "stddev_over_time": funcStddevOverTime, - "stdvar_over_time": funcStdvarOverTime, - "sum_over_time": funcSumOverTime, - "tan": funcTan, - "tanh": funcTanh, - "time": funcTime, - "timestamp": funcTimestamp, - "vector": funcVector, - "year": funcYear, + "abs": funcAbs, + "absent": funcAbsent, + "absent_over_time": funcAbsentOverTime, + "acos": funcAcos, + "acosh": funcAcosh, + "asin": funcAsin, + "asinh": funcAsinh, + "atan": funcAtan, + "atanh": funcAtanh, + "avg_over_time": funcAvgOverTime, + "ceil": funcCeil, + "changes": funcChanges, + "clamp": funcClamp, + "clamp_max": funcClampMax, + "clamp_min": funcClampMin, + "cos": funcCos, + "cosh": funcCosh, + "count_over_time": funcCountOverTime, + "days_in_month": funcDaysInMonth, + "day_of_month": funcDayOfMonth, + "day_of_week": funcDayOfWeek, + "day_of_year": funcDayOfYear, + "deg": funcDeg, + "delta": funcDelta, + "deriv": funcDeriv, + "exp": funcExp, + "floor": funcFloor, + "histogram_avg": funcHistogramAvg, + "histogram_count": funcHistogramCount, + "histogram_fraction": funcHistogramFraction, + "histogram_quantile": funcHistogramQuantile, + "histogram_sum": funcHistogramSum, + "histogram_stddev": funcHistogramStdDev, + "histogram_stdvar": funcHistogramStdVar, + "double_exponential_smoothing": funcDoubleExponentialSmoothing, + "hour": funcHour, + "idelta": funcIdelta, + "increase": funcIncrease, + "irate": funcIrate, + "label_replace": funcLabelReplace, + "label_join": funcLabelJoin, + "ln": funcLn, + "log10": funcLog10, + "log2": funcLog2, + "last_over_time": funcLastOverTime, + "mad_over_time": funcMadOverTime, + "max_over_time": funcMaxOverTime, + "min_over_time": funcMinOverTime, + "minute": funcMinute, + "month": funcMonth, + "pi": funcPi, + "predict_linear": funcPredictLinear, + "present_over_time": funcPresentOverTime, + "quantile_over_time": funcQuantileOverTime, + "rad": funcRad, + "rate": funcRate, + "resets": funcResets, + "round": funcRound, + "scalar": funcScalar, + "sgn": funcSgn, + "sin": funcSin, + "sinh": funcSinh, + "sort": funcSort, + "sort_desc": funcSortDesc, + "sort_by_label": funcSortByLabel, + "sort_by_label_desc": funcSortByLabelDesc, + "sqrt": funcSqrt, + "stddev_over_time": funcStddevOverTime, + "stdvar_over_time": funcStdvarOverTime, + "sum_over_time": funcSumOverTime, + "tan": funcTan, + "tanh": funcTanh, + "time": funcTime, + "timestamp": funcTimestamp, + "vector": funcVector, + "year": funcYear, } // AtModifierUnsafeFunctions are the functions whose result diff --git a/promql/functions_test.go b/promql/functions_test.go index aef59c8379..9ee0ba51dc 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/util/teststorage" ) @@ -39,7 +40,7 @@ func TestDeriv(t *testing.T) { MaxSamples: 10000, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) a := storage.Appender(context.Background()) diff --git a/promql/histogram_stats_iterator.go b/promql/histogram_stats_iterator.go index dfafea5f8c..459d5924ae 100644 --- a/promql/histogram_stats_iterator.go +++ b/promql/histogram_stats_iterator.go @@ -48,7 +48,6 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi var t int64 t, f.currentH = f.Iterator.AtHistogram(f.currentH) if value.IsStaleNaN(f.currentH.Sum) { - f.setLastH(f.currentH) h = &histogram.Histogram{Sum: f.currentH.Sum} return t, h } @@ -63,9 +62,13 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi return t, h } - h.CounterResetHint = f.getResetHint(f.currentH) - h.Count = f.currentH.Count - h.Sum = f.currentH.Sum + returnValue := histogram.Histogram{ + CounterResetHint: f.getResetHint(f.currentH), + Count: f.currentH.Count, + Sum: f.currentH.Sum, + } + returnValue.CopyTo(h) + f.setLastH(f.currentH) return t, h } @@ -77,7 +80,6 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) var t int64 t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH) if value.IsStaleNaN(f.currentFH.Sum) { - f.setLastFH(f.currentFH) return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum} } @@ -91,9 +93,13 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) return t, fh } - fh.CounterResetHint = f.getFloatResetHint(f.currentFH.CounterResetHint) - fh.Count = f.currentFH.Count - fh.Sum = f.currentFH.Sum + returnValue := histogram.FloatHistogram{ + CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint), + Count: f.currentFH.Count, + Sum: f.currentFH.Sum, + } + returnValue.CopyTo(fh) + f.setLastFH(f.currentFH) return t, fh } diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go index b71a9d6029..7a2953d3e2 100644 --- a/promql/histogram_stats_iterator_test.go +++ b/promql/histogram_stats_iterator_test.go @@ -14,62 +14,132 @@ package promql import ( + "fmt" + "math" "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/tsdbutil" ) func TestHistogramStatsDecoding(t *testing.T) { - histograms := []*histogram.Histogram{ - tsdbutil.GenerateTestHistogram(0), - tsdbutil.GenerateTestHistogram(1), - tsdbutil.GenerateTestHistogram(2), - tsdbutil.GenerateTestHistogram(2), - } - histograms[0].CounterResetHint = histogram.NotCounterReset - histograms[1].CounterResetHint = histogram.UnknownCounterReset - histograms[2].CounterResetHint = histogram.CounterReset - histograms[3].CounterResetHint = histogram.UnknownCounterReset - - expectedHints := []histogram.CounterResetHint{ - histogram.NotCounterReset, - histogram.NotCounterReset, - histogram.CounterReset, - histogram.NotCounterReset, + cases := []struct { + name string + histograms []*histogram.Histogram + expectedHints []histogram.CounterResetHint + }{ + { + name: "unknown counter reset triggers detection", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(2, histogram.CounterReset), + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.NotCounterReset, + histogram.CounterReset, + histogram.NotCounterReset, + }, + }, + { + name: "stale sample before unknown reset hint", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + {Sum: math.Float64frombits(value.StaleNaN)}, + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.NotCounterReset, + histogram.UnknownCounterReset, + histogram.NotCounterReset, + }, + }, + { + name: "unknown counter reset at the beginning", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + }, + }, + { + name: "detect real counter reset", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.CounterReset, + }, + }, + { + name: "detect real counter reset after stale NaN", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + {Sum: math.Float64frombits(value.StaleNaN)}, + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.UnknownCounterReset, + histogram.CounterReset, + }, + }, } - t.Run("histogram_stats", func(t *testing.T) { - decodedStats := make([]*histogram.Histogram, 0) - statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil)) - for statsIterator.Next() != chunkenc.ValNone { - _, h := statsIterator.AtHistogram(nil) - decodedStats = append(decodedStats, h) - } - for i := 0; i < len(histograms); i++ { - require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint) - require.Equal(t, histograms[i].Count, decodedStats[i].Count) - require.Equal(t, histograms[i].Sum, decodedStats[i].Sum) - } - }) - t.Run("float_histogram_stats", func(t *testing.T) { - decodedStats := make([]*histogram.FloatHistogram, 0) - statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil)) - for statsIterator.Next() != chunkenc.ValNone { - _, h := statsIterator.AtFloatHistogram(nil) - decodedStats = append(decodedStats, h) - } - for i := 0; i < len(histograms); i++ { - fh := histograms[i].ToFloat(nil) - require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint) - require.Equal(t, fh.Count, decodedStats[i].Count) - require.Equal(t, fh.Sum, decodedStats[i].Sum) - } - }) + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Run("histogram_stats", func(t *testing.T) { + decodedStats := make([]*histogram.Histogram, 0) + statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil)) + for statsIterator.Next() != chunkenc.ValNone { + _, h := statsIterator.AtHistogram(nil) + decodedStats = append(decodedStats, h) + } + for i := 0; i < len(tc.histograms); i++ { + require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint, fmt.Sprintf("mismatch in counter reset hint for histogram %d", i)) + h := tc.histograms[i] + if value.IsStaleNaN(h.Sum) { + require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) + require.Equal(t, uint64(0), decodedStats[i].Count) + } else { + require.Equal(t, tc.histograms[i].Count, decodedStats[i].Count) + require.Equal(t, tc.histograms[i].Sum, decodedStats[i].Sum) + } + } + }) + t.Run("float_histogram_stats", func(t *testing.T) { + decodedStats := make([]*histogram.FloatHistogram, 0) + statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil)) + for statsIterator.Next() != chunkenc.ValNone { + _, h := statsIterator.AtFloatHistogram(nil) + decodedStats = append(decodedStats, h) + } + for i := 0; i < len(tc.histograms); i++ { + require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint) + fh := tc.histograms[i].ToFloat(nil) + if value.IsStaleNaN(fh.Sum) { + require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) + require.Equal(t, float64(0), decodedStats[i].Count) + } else { + require.Equal(t, fh.Count, decodedStats[i].Count) + require.Equal(t, fh.Sum, decodedStats[i].Sum) + } + } + }) + }) + } } type histogramSeries struct { diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 830e8a2c5e..162d7817ab 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -352,8 +352,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) { // f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f // for all the non-nil children of node, recursively. func Inspect(node Node, f inspector) { - //nolint: errcheck - Walk(f, node, nil) + Walk(f, node, nil) //nolint:errcheck } // Children returns a list of all child nodes of a syntax tree node. @@ -419,7 +418,7 @@ func mergeRanges(first, last Node) posrange.PositionRange { } } -// Item implements the Node interface. +// PositionRange implements the Node interface. // This makes it possible to call mergeRanges on them. func (i *Item) PositionRange() posrange.PositionRange { return posrange.PositionRange{ diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 99b41321fe..9d7b560537 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -202,10 +202,11 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, ReturnType: ValueTypeVector, }, - "holt_winters": { - Name: "holt_winters", - ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, - ReturnType: ValueTypeVector, + "double_exponential_smoothing": { + Name: "double_exponential_smoothing", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Experimental: true, }, "hour": { Name: "hour", diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index b39c1150a5..befb9bdf3e 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -23,6 +23,8 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/promql/parser/posrange" + + "github.com/prometheus/common/model" ) %} @@ -43,7 +45,6 @@ import ( int int64 uint uint64 float float64 - duration time.Duration } @@ -85,6 +86,7 @@ NEGATIVE_BUCKETS_DESC ZERO_BUCKET_DESC ZERO_BUCKET_WIDTH_DESC CUSTOM_VALUES_DESC +COUNTER_RESET_HINT_DESC %token histogramDescEnd // Operators. @@ -126,6 +128,8 @@ STDDEV STDVAR SUM TOPK +LIMITK +LIMIT_RATIO %token aggregatorsEnd // Keywords. @@ -148,6 +152,14 @@ START END %token preprocessorEnd +// Counter reset hints. +%token counterResetHintsStart +%token +UNKNOWN_COUNTER_RESET +COUNTER_RESET +NOT_COUNTER_RESET +GAUGE_TYPE +%token counterResetHintsEnd // Start symbols for the generated parser. %token startSymbolsStart @@ -162,7 +174,7 @@ START_METRIC_SELECTOR // Type definitions for grammar rules. %type label_match_list %type label_matcher -%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier +%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint %type label_set metric %type label_set_list %type