diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f4d17b3596..bb4e2d24c9 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,4 +1,4 @@ -blank_issues_enabled: false +blank_issues_enabled: true contact_links: - name: Prometheus Community Support url: https://prometheus.io/community/ diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 89b2f4d0b6..191e07ffac 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,9 +1,22 @@ version: 2 updates: - - package-ecosystem: "gomod" + - package-ecosystem: "docker" directory: "/" schedule: interval: "monthly" + - package-ecosystem: "github-actions" + directories: + - "/" + - "/scripts" + schedule: + interval: "monthly" + - package-ecosystem: "gomod" + directories: + - "/" + - "/documentation/examples/remote_storage" + - "/internal/tools" + schedule: + interval: "monthly" groups: k8s.io: patterns: @@ -12,24 +25,3 @@ updates: patterns: - "go.opentelemetry.io/*" open-pull-requests-limit: 20 - - package-ecosystem: "gomod" - directory: "/documentation/examples/remote_storage" - schedule: - interval: "monthly" - - package-ecosystem: "npm" - directory: "/web/ui" - schedule: - interval: "monthly" - open-pull-requests-limit: 20 - - package-ecosystem: "github-actions" - directory: "/" - schedule: - interval: "monthly" - - package-ecosystem: "github-actions" - directory: "/scripts" - schedule: - interval: "monthly" - - package-ecosystem: "docker" - directory: "/" - schedule: - interval: "monthly" diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index 66a72af533..0000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,56 +0,0 @@ -# Configuration for probot-stale - https://github.com/probot/stale - -# Number of days of inactivity before an Issue or Pull Request becomes stale -daysUntilStale: 60 - -# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. -# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. -daysUntilClose: false - -# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) -onlyLabels: [] - -# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable -exemptLabels: - - keepalive - -# Set to true to ignore issues in a project (defaults to false) -exemptProjects: false - -# Set to true to ignore issues in a milestone (defaults to false) -exemptMilestones: false - -# Set to true to ignore issues with an assignee (defaults to false) -exemptAssignees: false - -# Label to use when marking as stale -staleLabel: stale - -# Comment to post when marking as stale. Set to `false` to disable -markComment: false - -# Comment to post when removing the stale label. -# unmarkComment: > -# Your comment here. - -# Comment to post when closing a stale Issue or Pull Request. -# closeComment: > -# Your comment here. - -# Limit the number of actions per hour, from 1-30. Default is 30 -limitPerRun: 30 - -# Limit to only `issues` or `pulls` -only: pulls - -# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': -# pulls: -# daysUntilStale: 30 -# markComment: > -# This pull request has been automatically marked as stale because it has not had -# recent activity. It will be closed if no further activity occurs. Thank you -# for your contributions. - -# issues: -# exemptLabels: -# - confirmed diff --git a/.github/workflows/automerge-dependabot.yml b/.github/workflows/automerge-dependabot.yml new file mode 100644 index 0000000000..3909f57329 --- /dev/null +++ b/.github/workflows/automerge-dependabot.yml @@ -0,0 +1,30 @@ +--- +name: Dependabot auto-merge +on: pull_request + +concurrency: + group: ${{ github.workflow }}-${{ (github.event.pull_request && github.event.pull_request.number) || github.ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + dependabot: + permissions: + contents: write + pull-requests: write + runs-on: ubuntu-latest + if: ${{ github.event.pull_request.user.login == 'dependabot[bot]' && github.repository_owner == 'prometheus' }} + steps: + - name: Dependabot metadata + id: metadata + uses: dependabot/fetch-metadata@d7267f607e9d3fb96fc2fbe83e0af444713e90b7 # v2.3.0 + with: + github-token: "${{ secrets.GITHUB_TOKEN }}" + - name: Enable auto-merge for Dependabot PRs + if: ${{steps.metadata.outputs.update-type == 'version-update:semver-minor' || steps.metadata.outputs.update-type == 'version-update:semver-patch'}} + run: gh pr merge --auto --merge "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index cbfeb2ba5b..4e942f1f3b 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -12,8 +12,10 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 8b964ef24c..add72cc89c 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -12,8 +12,10 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8b3624383c..ea10fd0091 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,6 +4,9 @@ on: pull_request: push: +permissions: + contents: read + jobs: test_go: name: Go tests @@ -11,13 +14,17 @@ jobs: container: # Whenever the Go version is updated here, .promu.yml # should also be updated. - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.24-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7 - uses: ./.github/promci/actions/setup_environment - - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 - - run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false + with: + enable_npm: true + - run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1 + - run: go test ./tsdb/ -test.tsdb-isolation=false - run: make -C documentation/examples/remote_storage - run: make -C documentation/examples @@ -25,13 +32,17 @@ jobs: name: More Go tests runs-on: ubuntu-latest container: - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.24-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... - - run: GOARCH=386 go test ./cmd/prometheus + - run: go test --tags=slicelabels -race ./cmd/prometheus + - run: go test --tags=forcedirectio -race ./tsdb/ + - run: GOARCH=386 go test ./... - uses: ./.github/promci/actions/check_proto with: version: "3.15.8" @@ -39,11 +50,16 @@ jobs: test_go_oldest: name: Go tests with previous Go version runs-on: ubuntu-latest + env: + # Enforce the Go version. + GOTOOLCHAIN: local container: # The go version in this image should be N-1 wrt test_go. - image: quay.io/prometheus/golang-builder:1.21-base + image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false - run: make build # Don't run NPM build; don't run race-detector. - run: make test GO_ONLY=1 test-flags="" @@ -54,11 +70,13 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.24-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7 - uses: ./.github/promci/actions/setup_environment with: enable_go: false @@ -74,12 +92,14 @@ jobs: name: Go tests on Windows runs-on: windows-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: - go-version: 1.22.x + persist-credentials: false + - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + with: + go-version: 1.24.x - run: | - $TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"} + $TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"} go test $TestTargets -vet=off -v shell: powershell @@ -89,9 +109,11 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.24-base steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false - run: go install ./cmd/promtool/. - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest @@ -107,6 +129,8 @@ jobs: if: | !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) && + !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) + && !(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-')) && !(github.event_name == 'push' && github.event.ref == 'refs/heads/main') @@ -114,8 +138,10 @@ jobs: matrix: thread: [ 0, 1, 2 ] steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7 - uses: ./.github/promci/actions/build with: promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" @@ -127,6 +153,8 @@ jobs: if: | (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) + || (github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-')) || (github.event_name == 'push' && github.event.ref == 'refs/heads/main') @@ -137,23 +165,50 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7 - uses: ./.github/promci/actions/build with: parallelism: 12 thread: ${{ matrix.thread }} + build_all_status: + # This status check aggregates the individual matrix jobs of the "Build + # Prometheus for all architectures" step into a final status. Fails if a + # single matrix job fails, succeeds if all matrix jobs succeed. + # See https://github.com/orgs/community/discussions/4324 for why this is + # needed + name: Report status of build Prometheus for all architectures + runs-on: ubuntu-latest + needs: [build_all] + # The run condition needs to include always(). Otherwise actions + # behave unexpected: + # only "needs" will make the Status Report be skipped if one of the builds fails https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/using-jobs-in-a-workflow#defining-prerequisite-jobs + # And skipped is treated as success https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborat[…]n-repositories-with-code-quality-features/about-status-checks + # Adding always ensures that the status check is run independently of the + # results of Build All + if: always() && github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-') + steps: + - name: Successful build + if: ${{ !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled')) }} + run: exit 0 + - name: Failing or cancelled build + if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} + run: exit 1 check_generated_parser: name: Check generated parser runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: cache: false - go-version: 1.22.x + go-version: 1.24.x - name: Run goyacc and check for diff run: make install-goyacc check-generated-parser golangci: @@ -161,25 +216,30 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: - go-version: 1.22.x + persist-credentials: false + - name: Install Go + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + with: + go-version: 1.24.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 + uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0 with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. - version: v1.59.1 + version: v2.1.5 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' codeql: uses: ./.github/workflows/codeql-analysis.yml + permissions: + contents: read + security-events: write publish_main: name: Publish main branch artifacts @@ -187,8 +247,10 @@ jobs: needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7 - uses: ./.github/promci/actions/publish_main with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -199,10 +261,15 @@ jobs: name: Publish release artefacts runs-on: ubuntu-latest needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') + if: | + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7 - uses: ./.github/promci/actions/publish_release with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -216,31 +283,44 @@ jobs: needs: [test_ui, codeql] steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7 - name: Install nodejs - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" - - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: path: ~/.npm key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} restore-keys: | ${{ runner.os }}-node- - name: Check libraries version - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') - run: ./scripts/ui_release.sh --check-package "$(echo ${{ github.ref_name }}|sed s/v2/v0/)" + if: | + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) + run: ./scripts/ui_release.sh --check-package "$(./scripts/get_module_version.sh ${GH_REF_NAME})" + env: + GH_REF_NAME: ${{ github.ref_name }} - name: build run: make assets - name: Copy files before publishing libs run: ./scripts/ui_release.sh --copy - name: Publish dry-run libraries - if: "!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))" + if: | + !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + && + !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) run: ./scripts/ui_release.sh --publish dry-run - name: Publish libraries - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') + if: | + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')) + || + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) run: ./scripts/ui_release.sh --publish env: # The setup-node action writes an .npmrc file with this env variable diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 12ffc659c2..b444815d3c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,15 +24,17 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false - name: Initialize CodeQL - uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/autobuild@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml index 8ddbc34aeb..7de8bb8da7 100644 --- a/.github/workflows/container_description.yml +++ b/.github/workflows/container_description.yml @@ -18,7 +18,9 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false - name: Set docker hub repo name run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - name: Push README to Dockerhub @@ -40,7 +42,9 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false - name: Set quay.io org name run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV - name: Set quay.io repo name diff --git a/.github/workflows/funcbench.yml b/.github/workflows/funcbench.yml deleted file mode 100644 index 8959f82142..0000000000 --- a/.github/workflows/funcbench.yml +++ /dev/null @@ -1,61 +0,0 @@ -on: - repository_dispatch: - types: [funcbench_start] -name: Funcbench Workflow -permissions: - contents: read - -jobs: - run_funcbench: - name: Running funcbench - if: github.event.action == 'funcbench_start' - runs-on: ubuntu-latest - env: - AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }} - BRANCH: ${{ github.event.client_payload.BRANCH }} - BENCH_FUNC_REGEX: ${{ github.event.client_payload.BENCH_FUNC_REGEX }} - PACKAGE_PATH: ${{ github.event.client_payload.PACKAGE_PATH }} - GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} - GITHUB_ORG: prometheus - GITHUB_REPO: prometheus - GITHUB_STATUS_TARGET_URL: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}} - LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }} - GKE_PROJECT_ID: macro-mile-203600 - PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }} - PROVIDER: gke - ZONE: europe-west3-a - steps: - - name: Update status to pending - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"pending","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Prepare nodepool - uses: docker://prominfra/funcbench:master - with: - entrypoint: "docker_entrypoint" - args: make deploy - - name: Delete all resources - if: always() - uses: docker://prominfra/funcbench:master - with: - entrypoint: "docker_entrypoint" - args: make clean - - name: Update status to failure - if: failure() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"failure","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Update status to success - if: success() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"success","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index dc510e5966..27c09b4187 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -10,18 +10,20 @@ jobs: steps: - name: Build Fuzzers id: build - uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master + uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@cafd7a0eb8ecb4e007c56897996a9b65c49c972f # master with: oss-fuzz-project-name: "prometheus" dry-run: false - name: Run Fuzzers - uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master + uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@cafd7a0eb8ecb4e007c56897996a9b65c49c972f # master + # Note: Regularly check for updates to the pinned commit hash at: + # https://github.com/google/oss-fuzz/tree/master/infra/cifuzz/actions/run_fuzzers with: oss-fuzz-project-name: "prometheus" fuzz-seconds: 600 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/prombench.yml b/.github/workflows/prombench.yml index 6ee172662b..65d1d71917 100644 --- a/.github/workflows/prombench.yml +++ b/.github/workflows/prombench.yml @@ -2,6 +2,8 @@ on: repository_dispatch: types: [prombench_start, prombench_restart, prombench_stop] name: Prombench Workflow +permissions: + contents: read env: AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }} CLUSTER_NAME: test-infra @@ -15,6 +17,8 @@ env: PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }} PROVIDER: gke RELEASE: ${{ github.event.client_payload.RELEASE }} + BENCHMARK_VERSION: ${{ github.event.client_payload.BENCHMARK_VERSION }} + BENCHMARK_DIRECTORY: ${{ github.event.client_payload.BENCHMARK_DIRECTORY }} ZONE: europe-west3-a jobs: benchmark_start: diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index 537e9abd84..fa8d2e5abe 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -13,7 +13,9 @@ jobs: container: image: quay.io/prometheus/golang-builder steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false - run: ./scripts/sync_repo_files.sh env: GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c82fa87a1e..c2335a8e46 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -21,12 +21,12 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 with: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3 + uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # tag=v2.4.1 with: results_file: results.sarif results_format: sarif @@ -37,7 +37,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # tag=v4.6.2 with: name: SARIF file path: results.sarif @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11 + uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # tag=v3.28.16 with: sarif_file: results.sarif diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000..371d92a69a --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,31 @@ +name: Stale Check +on: + workflow_dispatch: {} + schedule: + - cron: '16 22 * * *' +permissions: + issues: write + pull-requests: write +jobs: + stale: + if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. + runs-on: ubuntu-latest + steps: + - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + # opt out of defaults to avoid marking issues as stale and closing them + # https://github.com/actions/stale#days-before-close + # https://github.com/actions/stale#days-before-stale + days-before-stale: -1 + days-before-close: -1 + # Setting it to empty string to skip comments. + # https://github.com/actions/stale#stale-pr-message + # https://github.com/actions/stale#stale-issue-message + stale-pr-message: '' + stale-issue-message: '' + operations-per-run: 30 + # override days-before-stale, for only marking the pull requests as stale + days-before-pr-stale: 60 + stale-pr-label: stale + exempt-pr-labels: keepalive diff --git a/.gitignore b/.gitignore index e85d766b09..0d99305f69 100644 --- a/.gitignore +++ b/.gitignore @@ -22,7 +22,7 @@ benchmark.txt /documentation/examples/remote_storage/example_write_adapter/example_write_adapter npm_licenses.tar.bz2 -/web/ui/static/react +/web/ui/static /vendor /.build diff --git a/.golangci.yml b/.golangci.yml index e924fe3d5b..1a744b4142 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,146 +1,177 @@ -run: - timeout: 15m +formatters: + enable: + - gci + - gofumpt + - goimports + settings: + gci: + sections: + - standard + - default + - prefix(github.com/prometheus/prometheus) + gofumpt: + extra-rules: true + goimports: + local-prefixes: + - github.com/prometheus/prometheus -output: - sort-results: true +issues: + max-issues-per-linter: 0 + max-same-issues: 0 linters: + # Keep this list sorted alphabetically enable: - depguard - errorlint + - exptostd - gocritic - godot - - gofumpt - - goimports + - loggercheck - misspell - - nolintlint + - nilnesserr + # TODO(bwplotka): Enable once https://github.com/golangci/golangci-lint/issues/3228 is fixed. + # - nolintlint - perfsprint - predeclared - revive + - sloglint - testifylint - unconvert - unused - usestdlibvars - whitespace - - loggercheck -issues: - max-same-issues: 0 - exclude-files: - # Skip autogenerated files. - - ^.*\.(pb|y)\.go$ - exclude-dirs: - # Copied it from a different source - - storage/remote/otlptranslator/prometheusremotewrite - - storage/remote/otlptranslator/prometheus - exclude-rules: - - linters: - - gocritic - text: "appendAssign" - - path: _test.go - linters: - - errcheck - - path: "tsdb/head_wal.go" - linters: - - errorlint - - linters: - - godot - source: "^// ===" - - linters: - - perfsprint - text: "fmt.Sprintf can be replaced with string concatenation" -linters-settings: - depguard: + exclusions: + paths: + # Skip autogenerated files. + - ^.*\.(pb|y)\.go$ rules: - main: - deny: - - pkg: "sync/atomic" - desc: "Use go.uber.org/atomic instead of sync/atomic" - - pkg: "github.com/stretchr/testify/assert" - desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" - - pkg: "github.com/go-kit/kit/log" - desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" - - pkg: "io/ioutil" - desc: "Use corresponding 'os' or 'io' functions instead." - - pkg: "regexp" - desc: "Use github.com/grafana/regexp instead of regexp" - - pkg: "github.com/pkg/errors" - desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors" - - pkg: "gzip" - desc: "Use github.com/klauspost/compress instead of gzip" - - pkg: "zlib" - desc: "Use github.com/klauspost/compress instead of zlib" - - pkg: "golang.org/x/exp/slices" - desc: "Use 'slices' instead." - errcheck: - exclude-functions: - # Don't flag lines such as "io.Copy(io.Discard, resp.Body)". - - io.Copy - # The next two are used in HTTP handlers, any error is handled by the server itself. - - io.WriteString - - (net/http.ResponseWriter).Write - # No need to check for errors on server's shutdown. - - (*net/http.Server).Shutdown - # Never check for logger errors. - - (github.com/go-kit/log.Logger).Log - # Never check for rollback errors as Rollback() is called when a previous error was detected. - - (github.com/prometheus/prometheus/storage.Appender).Rollback - goimports: - local-prefixes: github.com/prometheus/prometheus - gofumpt: - extra-rules: true - perfsprint: - # Optimizes `fmt.Errorf`. - errorf: false - revive: - # By default, revive will enable only the linting rules that are named in the configuration file. - # So, it's needed to explicitly set in configuration all required rules. - # The following configuration enables all the rules from the defaults.toml - # https://github.com/mgechev/revive/blob/master/defaults.toml - rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md - - name: blank-imports - - name: context-as-argument - arguments: - # allow functions with test or bench signatures - - allowTypesBefore: "*testing.T,testing.TB" - - name: context-keys-type - - name: dot-imports - # A lot of false positives: incorrectly identifies channel draining as "empty code block". - # See https://github.com/mgechev/revive/issues/386 - - name: empty-block - disabled: true - - name: error-naming - - name: error-return - - name: error-strings - - name: errorf - - name: exported - - name: increment-decrement - - name: indent-error-flow - - name: package-comments - - name: range - - name: receiver-naming - - name: redefines-builtin-id - - name: superfluous-else - - name: time-naming - - name: unexported-return - - name: unreachable-code - - name: unused-parameter - disabled: true - - name: var-declaration - - name: var-naming - testifylint: - disable: - - float-compare - - go-require - enable: - - bool-compare - - compares - - empty - - error-is-as - - error-nil - - expected-actual - - len - - require-error - - suite-dont-use-pkg - - suite-extra-assert-call + - linters: + - errcheck + # Taken from the default exclusions in v1. + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + - linters: + - govet + # We use many Seek methods that do not follow the usual pattern. + text: "stdmethods: method Seek.* should have signature Seek" + - linters: + - revive + # We have stopped at some point to write doc comments on exported symbols. + # TODO(beorn7): Maybe we should enforce this again? There are ~500 offenders right now. + text: exported (.+) should have comment( \(or a comment on this block\))? or be unexported + - linters: + - gocritic + text: "appendAssign" + - linters: + - errcheck + path: _test.go + - linters: + - errorlint + path: "tsdb/head_wal.go" + - linters: + - godot + source: "^// ===" + warn-unused: true + settings: + depguard: + rules: + main: + deny: + - pkg: "sync/atomic" + desc: "Use go.uber.org/atomic instead of sync/atomic" + - pkg: "github.com/stretchr/testify/assert" + desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" + - pkg: "github.com/go-kit/kit/log" + desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" + - pkg: "io/ioutil" + desc: "Use corresponding 'os' or 'io' functions instead." + - pkg: "regexp" + desc: "Use github.com/grafana/regexp instead of regexp" + - pkg: "github.com/pkg/errors" + desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors" + - pkg: "gzip" + desc: "Use github.com/klauspost/compress instead of gzip" + - pkg: "zlib" + desc: "Use github.com/klauspost/compress instead of zlib" + - pkg: "golang.org/x/exp/slices" + desc: "Use 'slices' instead." + errcheck: + exclude-functions: + # Don't flag lines such as "io.Copy(io.Discard, resp.Body)". + - io.Copy + # The next two are used in HTTP handlers, any error is handled by the server itself. + - io.WriteString + - (net/http.ResponseWriter).Write + # No need to check for errors on server's shutdown. + - (*net/http.Server).Shutdown + # Never check for rollback errors as Rollback() is called when a previous error was detected. + - (github.com/prometheus/prometheus/storage.Appender).Rollback + perfsprint: + # Optimizes even if it requires an int or uint type cast. + int-conversion: true + # Optimizes into `err.Error()` even if it is only equivalent for non-nil errors. + err-error: true + # Optimizes `fmt.Errorf`. + errorf: true + # Optimizes `fmt.Sprintf` with only one argument. + sprintf1: true + # Optimizes into strings concatenation. + strconcat: false + revive: + # By default, revive will enable only the linting rules that are named in the configuration file. + # So, it's needed to explicitly enable all required rules here. + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md + - name: blank-imports + - name: comment-spacings + - name: context-as-argument + arguments: + # Allow functions with test or bench signatures. + - allowTypesBefore: '*testing.T,testing.TB' + - name: context-keys-type + - name: dot-imports + - name: early-return + arguments: + - "preserveScope" + # A lot of false positives: incorrectly identifies channel draining as "empty code block". + # See https://github.com/mgechev/revive/issues/386 + - name: empty-block + disabled: true + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: increment-decrement + - name: indent-error-flow + arguments: + - "preserveScope" + - name: package-comments + # TODO(beorn7): Currently, we have a lot of missing package doc comments. Maybe we should have them. + disabled: true + - name: range + - name: receiver-naming + - name: redefines-builtin-id + - name: superfluous-else + arguments: + - "preserveScope" + - name: time-naming + - name: unexported-return + - name: unreachable-code + - name: unused-parameter + - name: var-declaration + - name: var-naming + testifylint: + disable: + - float-compare + - go-require + enable-all: true + +output: + show-stats: false + +run: + timeout: 15m + +version: "2" diff --git a/.promu.yml b/.promu.yml index 0aa51d6d31..d16bceeed9 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,7 +1,7 @@ go: # Whenever the Go version is updated here, # .github/workflows should also be updated. - version: 1.22 + version: 1.24 repository: path: github.com/prometheus/prometheus build: @@ -14,10 +14,8 @@ build: all: - netgo - builtinassets - - stringlabels windows: - builtinassets - - stringlabels ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} @@ -28,8 +26,6 @@ tarball: # Whenever there are new files to include in the tarball, # remember to make sure the new files will be generated after `make build`. files: - - consoles - - console_libraries - documentation/examples/prometheus.yml - LICENSE - NOTICE diff --git a/.yamllint b/.yamllint index 1859cb624b..8d09c375fd 100644 --- a/.yamllint +++ b/.yamllint @@ -1,7 +1,7 @@ --- extends: default ignore: | - ui/react-app/node_modules + **/node_modules rules: braces: diff --git a/CHANGELOG.md b/CHANGELOG.md index d5a91e9009..793a625bd4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,320 @@ # Changelog -## unreleased +## main / unreleased + +* [FEATURE] OTLP receiver: Support promoting OTel scope name/version/schema URL/attributes as metric labels, enable via configuration parameter `otlp.promote_scope_metadata`. #16730 #16760 + +## 3.4.2 / 2025-06-26 + +* [BUGFIX] OTLP receiver: Fix default configuration not being respected if the `otlp:` block is unset in the config file. #16693 + +## 3.4.1 / 2025-05-31 + +* [BUGFIX] Parser: Add reproducer for a dangling-reference issue in parsers. #16633 + +## 3.4.0 / 2025-05-17 + +* [CHANGE] Config: Make setting out-of-order native histograms feature (`--enable-feature=ooo-native-histograms`) a no-op. Out-of-order native histograms are now always enabled when `out_of_order_time_window` is greater than zero and `--enable-feature=native-histograms` is set. #16207 +* [FEATURE] OTLP translate: Add feature flag for optionally translating OTel explicit bucket histograms into native histograms with custom buckets. #15850 +* [FEATURE] OTLP translate: Add option to receive OTLP metrics without translating names or attributes. #16441 +* [FEATURE] PromQL: allow arithmetic operations in durations in PromQL parser. #16249 +* [FEATURE] OTLP receiver: Add primitive support for ingesting OTLP delta metrics as-is. #16360 +* [ENHANCEMENT] PromQL: histogram_fraction for bucket histograms. #16095 +* [ENHANCEMENT] TSDB: add `prometheus_tsdb_wal_replay_unknown_refs_total` and `prometheus_tsdb_wbl_replay_unknown_refs_total` metrics to track unknown series references during WAL/WBL replay. #16166 +* [ENHANCEMENT] Scraping: Add config option for escaping scheme request. #16066 +* [ENHANCEMENT] Config: Add global config option for convert_classic_histograms_to_nhcb. #16226 +* [ENHANCEMENT] Alerting: make batch size configurable (`--alertmanager.notification-batch-size`). #16254 +* [PERF] Kubernetes SD: make endpointSlice discovery more efficient. #16433 +* [BUGFIX] Config: Fix auto-reload on changes to rule and scrape config files. #16340 +* [BUGFIX] Scraping: Skip native histogram series if ingestion is disabled. #16218 +* [BUGFIX] TSDB: Handle metadata/tombstones/exemplars for duplicate series during WAL replay. #16231 +* [BUGFIX] TSDB: Avoid processing exemplars outside the valid time range during WAL replay. #16242 +* [BUGFIX] Promtool: Add feature flags for PromQL features. #16443 +* [BUGFIX] Rules: correct logging of alert name & template data. #15093 +* [BUGFIX] PromQL: Use arithmetic mean for `histogram_stddev()` and `histogram_stdvar()` . #16444 + +## 3.3.0 / 2025-04-15 + +* [FEATURE] PromQL: Implement `idelta()` and `irate()` for native histograms. #15853 +* [ENHANCEMENT] Scaleway SD: Add `__meta_scaleway_instance_public_ipv4_addresses` and `__meta_scaleway_instance_public_ipv6_addresses` labels. #14228 +* [ENHANCEMENT] TSDB: Reduce locking while reloading blocks. #12920 +* [ENHANCEMENT] PromQL: Allow UTF-8 labels in `label_replace()`. #15974 +* [ENHANCEMENT] Promtool: `tsdb create-blocks-from openmetrics` can now read from a Pipe. #16011 +* [ENHANCEMENT] Rules: Add support for anchors and aliases in rule files. #14957 +* [ENHANCEMENT] Dockerfile: Make `/prometheus` writable. #16073 +* [ENHANCEMENT] API: Include scrape pool name for dropped targets in `/api/v1/targets`. #16085 +* [ENHANCEMENT] UI: Improve time formatting and copying of selectors. #15999 #16165 +* [ENHANCEMENT] UI: Bring back vertical grid lines and graph legend series toggling instructions. #16163 #16164 +* [ENHANCEMENT] Mixin: The `cluster` label can be customized using `clusterLabel`. #15826 +* [PERF] TSDB: Optimize some operations on head chunks by taking shortcuts. #12659 +* [PERF] TSDB & Agent: Reduce memory footprint during WL replay. #15778 +* [PERF] Remote-Write: Reduce memory footprint during WAL replay. #16197 +* [PERF] API: Reduce memory footprint during header parsing. #16001 +* [PERF] Rules: Improve dependency evaluation, enabling better concurrency. #16039 +* [PERF] Scraping: Improve scraping performance for native histograms. #15731 +* [PERF] Scraping: Improve parsing of created timestamps. #16072 +* [BUGFIX] Scraping: Bump cache iteration after error to avoid false duplicate detections. #16174 +* [BUGFIX] Scraping: Skip native histograms series when ingestion is disabled. #16218 +* [BUGFIX] PromQL: Fix counter reset detection for native histograms. #15902 #15987 +* [BUGFIX] PromQL: Fix inconsistent behavior with an empty range. #15970 +* [BUGFIX] PromQL: Fix inconsistent annotation in `quantile_over_time()`. #16018 +* [BUGFIX] PromQL: Prevent `label_join()` from producing duplicates. #15975 +* [BUGFIX] PromQL: Ignore native histograms in `scalar()`, `sort()` and `sort_desc()`. #15964 +* [BUGFIX] PromQL: Fix annotations for binary operations between incompatible native histograms. #15895 +* [BUGFIX] Alerting: Consider alert relabeling when deciding whether alerts are dropped. #15979 +* [BUGFIX] Config: Set `GoGC` to the default value in case of an empty configuration. #16052 +* [BUGFIX] TSDB: Fix unknown series errors and potential data loss during WAL replay when inactive series are removed from the head and reappear before the next WAL checkpoint. #16060 +* [BUGFIX] Scaleway SD: The public IP will no longer be set to `__meta_meta_scaleway_instance_public_ipv4` if it is an IPv6 address. #14228 +* [BUGFIX] UI: Display the correct value of Alerting rules' `keep_firing_for`. #16211 + +## 3.2.1 / 2025-02-25 + +* [BUGFIX] Don't send Accept` header `escape=allow-utf-8` when `metric_name_validation_scheme: legacy` is configured. #16061 + +## 3.2.0 / 2025-02-17 + +* [CHANGE] relabel: Replace actions can now use UTF-8 characters in `targetLabel` field. Note that `$` or `${}` will be expanded. This also apply to `replacement` field for `LabelMap` action. #15851 +* [CHANGE] rulefmt: Rule names can use UTF-8 characters, except `{` and `}` characters (due to common mistake checks). #15851 +* [FEATURE] remote/otlp: Add feature flag `otlp-deltatocumulative` to support conversion from delta to cumulative. #15165 +* [ENHANCEMENT] openstack SD: Discover Octavia loadbalancers. #15539 +* [ENHANCEMENT] scrape: Add metadata for automatic metrics to WAL for `metadata-wal-records` feature. #15837 +* [ENHANCEMENT] promtool: Support linting of scrape interval, through lint option `too-long-scrape-interval`. #15719 +* [ENHANCEMENT] promtool: Add --ignore-unknown-fields option. #15706 +* [ENHANCEMENT] ui: Make "hide empty rules" and hide empty rules" persistent #15807 +* [ENHANCEMENT] web/api: Add a limit parameter to `/query` and `/query_range`. #15552 +* [ENHANCEMENT] api: Add fields Node and ServerTime to `/status`. #15784 +* [PERF] Scraping: defer computing labels for dropped targets until they are needed by the UI. #15261 +* [BUGFIX] remotewrite2: Fix invalid metadata bug for metrics without metadata. #15829 +* [BUGFIX] remotewrite2: Fix the unit field propagation. #15825 +* [BUGFIX] scrape: Fix WAL metadata for histograms and summaries. #15832 +* [BUGFIX] ui: Merge duplicate "Alerts page settings" sections. #15810 +* [BUGFIX] PromQL: Fix `` functions with histograms. #15711 + +## 3.1.0 / 2025-01-02 + + * [SECURITY] upgrade golang.org/x/crypto to address reported CVE-2024-45337. #15691 + * [CHANGE] Notifier: Increment prometheus_notifications_errors_total by the number of affected alerts rather than per batch. #15428 + * [CHANGE] API: list rules field "groupNextToken:omitempty" renamed to "groupNextToken". #15400 + * [ENHANCEMENT] OTLP translate: keep identifying attributes in target_info. #15448 + * [ENHANCEMENT] Paginate rule groups, add infinite scroll to rules within groups. #15677 + * [ENHANCEMENT] TSDB: Improve calculation of space used by labels. #13880 + * [ENHANCEMENT] Rules: new metric rule_group_last_rule_duration_sum_seconds. #15672 + * [ENHANCEMENT] Observability: Export 'go_sync_mutex_wait_total_seconds_total' metric. #15339 + * [ENHANCEMEN] Remote-Write: optionally use a DNS resolver that picks a random IP. #15329 + * [PERF] Optimize `l=~".+"` matcher. #15474, #15684 + * [PERF] TSDB: Cache all symbols for compaction . #15455 + * [PERF] TSDB: MemPostings: keep a map of label values slices. #15426 + * [PERF] Remote-Write: Remove interning hook. #15456 + * [PERF] Scrape: optimize string manipulation for experimental native histograms with custom buckets. #15453 + * [PERF] TSDB: reduce memory allocations. #15465, #15427 + * [PERF] Storage: Implement limit in mergeGenericQuerier. #14489 + * [PERF] TSDB: Optimize inverse matching. #14144 + * [PERF] Regex: use stack memory for lowercase copy of string. #15210 + * [PERF] TSDB: When deleting from postings index, pause to unlock and let readers read. #15242 + * [BUGFIX] Main: Avoid possible segfault at exit. (#15724) + * [BUGFIX] Rules: Do not run rules concurrently if uncertain about dependencies. #15560 + * [BUGFIX] PromQL: Adds test for `absent`, `absent_over_time` and `deriv` func with histograms. #15667 + * [BUGFIX] PromQL: Fix various bugs related to quoting UTF-8 characters. #15531 + * [BUGFIX] Scrape: fix nil panic after scrape loop reload. #15563 + * [BUGFIX] Remote-write: fix panic on repeated log message. #15562 + * [BUGFIX] Scrape: reload would ignore always_scrape_classic_histograms and convert_classic_histograms_to_nhcb configs. #15489 + * [BUGFIX] TSDB: fix data corruption in experimental native histograms. #15482 + * [BUGFIX] PromQL: Ignore histograms in all time related functions. #15479 + * [BUGFIX] OTLP receiver: Convert metric metadata. #15416 + * [BUGFIX] PromQL: Fix `resets` function for histograms. #15527 + * [BUGFIX] PromQL: Fix behaviour of `changes()` for mix of histograms and floats. #15469 + * [BUGFIX] PromQL: Fix behaviour of some aggregations with histograms. #15432 + * [BUGFIX] allow quoted exemplar keys in openmetrics text format. #15260 + * [BUGFIX] TSDB: fixes for rare conditions when loading write-behind-log (WBL). #15380 + * [BUGFIX] `round()` function did not remove `__name__` label. #15250 + * [BUGFIX] Promtool: analyze block shows metric name with 0 cardinality. #15438 + * [BUGFIX] PromQL: Fix `count_values` for histograms. #15422 + * [BUGFIX] PromQL: fix issues with comparison binary operations with `bool` modifier and native histograms. #15413 + * [BUGFIX] PromQL: fix incorrect "native histogram ignored in aggregation" annotations. #15414 + * [BUGFIX] PromQL: Corrects the behaviour of some operator and aggregators with Native Histograms. #15245 + * [BUGFIX] TSDB: Always return unknown hint for first sample in non-gauge histogram chunk. #15343 + * [BUGFIX] PromQL: Clamp functions: Ignore any points with native histograms. #15169 + * [BUGFIX] TSDB: Fix race on stale values in headAppender. #15322 + * [BUGFIX] UI: Fix selector / series formatting for empty metric names. #15340 + * [BUGFIX] OTLP receiver: Allow colons in non-standard units. #15710 + +## 3.0.1 / 2024-11-28 + +The first bug fix release for Prometheus 3. + +* [BUGFIX] Promql: Make subqueries left open. #15431 +* [BUGFIX] Fix memory leak when query log is enabled. #15434 +* [BUGFIX] Support utf8 names on /v1/label/:name/values endpoint. #15399 + +## 3.0.0 / 2024-11-14 + +This release includes new features such as a brand new UI and UTF-8 support enabled by default. As this marks the first new major version in seven years, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. For users that want to upgrade we recommend to read through our [migration guide](https://prometheus.io/docs/prometheus/3.0/migration/). + +* [CHANGE] Set the `GOMAXPROCS` variable automatically to match the Linux CPU quota. Use `--no-auto-gomaxprocs` to disable it. The `auto-gomaxprocs` feature flag was removed. #15376 +* [CHANGE] Set the `GOMEMLIMIT` variable automatically to match the Linux container memory limit. Use `--no-auto-gomemlimit` to disable it. The `auto-gomemlimit` feature flag was removed. #15373 +* [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136 +* [CHANGE] Remote-write: default enable_http2 to false. #15219 +* [CHANGE] Scraping: normalize "le" and "quantile" label values upon ingestion. #15164 +* [CHANGE] Scraping: config `scrape_classic_histograms` was renamed to `always_scrape_classic_histograms`. #15178 +* [CHANGE] Config: remove expand-external-labels flag, expand external labels env vars by default. #14657 +* [CHANGE] Disallow configuring AM with the v1 api. #13883 +* [CHANGE] regexp `.` now matches all characters (performance improvement). #14505 +* [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930 +* [CHANGE] API: The OTLP receiver endpoint can now be enabled using `--web.enable-otlp-receiver` instead of `--enable-feature=otlp-write-receiver`. #14894 +* [CHANGE] Prometheus will not add or remove port numbers from the target address. `no-default-scrape-port` feature flag removed. #14160 +* [CHANGE] Logging: the format of log lines has changed a little, along with the adoption of Go's Structured Logging package. #14906 +* [CHANGE] Don't create extra `_created` timeseries if feature-flag `created-timestamp-zero-ingestion` is enabled. #14738 +* [CHANGE] Float literals and time durations being the same is now a stable fetaure. #15111 +* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872 +* [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904 +* [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365 +* [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365 +* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705, #15258 +* [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807 +* [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770 +* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747 +* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526 +* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 +* [FEATURE] OTLP receiver: Ability to skip UTF-8 normalization using `otlp.translation_strategy = NoUTF8EscapingWithSuffixes` configuration option. #15384 +* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769, #15011 +* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 +* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196 +* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694 +* [ENHANCEMENT] UI: Many fixes and improvements. #14898, #14899, #14907, #14908, #14912, #14913, #14914, #14931, #14940, #14945, #14946, #14972, #14981, #14982, #14994, #15096 +* [ENHANCEMENT] UI: Web UI now displays notifications, e.g. when starting up and shutting down. #15082 +* [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677 +* [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546 +* [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909 +* [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929 +* [ENHANCEMENT] Consul SD: Support catalog filters. #11224 +* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 +* [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975 +* [PERF] TSDB: Chunk encoding: shorten some write sequences. #14932 +* [PERF] TSDB: Grow postings by doubling. #14721 +* [PERF] Relabeling: Optimize adding a constant label pair. #12180 +* [BUGFIX] Scraping: Don't log errors on empty scrapes. #15357 +* [BUGFIX] UI: fix selector / series formatting for empty metric names. #15341 +* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 +* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 +* [BUGFIX] OTLP receiver: Preserve colons when generating metric names in suffix adding mode (this mode is always enabled, unless one uses Prometheus as a library). #15251 +* [BUGFIX] Scraping: Unit was missing when using protobuf format. #15095 +* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910 +* [BUGFIX] TSDB: Chunks could have one unnecessary zero byte at the end. #14854 +* [BUGFIX] "superfluous response.WriteHeader call" messages in log. #14884 +* [BUGFIX] PromQL: Unary negation of native histograms. #14821 +* [BUGFIX] PromQL: Handle stale marker in native histogram series (e.g. if series goes away and comes back). #15025 +* [BUGFIX] Autoreload: Reload invalid yaml files. #14947 +* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029 + +## 2.53.5 / 2025-06-30 + +* [ENHANCEMENT] TSDB: Add backward compatibility with the upcoming TSDB block index v3 #16762 +* [BUGFIX] Top-level: Update GOGC before loading TSDB #16521 + +## 2.53.4 / 2025-03-18 + +* [BUGFIX] Runtime: fix GOGC is being set to 0 when installed with empty prometheus.yml file resulting high cpu usage. #16090 +* [BUGFIX] Scrape: fix dropping valid metrics after previous scrape failed. #16220 + +## 2.53.3 / 2024-11-04 + +* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685, #14740 + +## 2.53.2 / 2024-08-09 + +Fix a bug where Prometheus would crash with a segmentation fault if a remote-read +request accessed a block on disk at about the same time as TSDB created a new block. + +* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515,#14523 + +## 2.55.1 / 2024-11-04 + +* [BUGFIX] `round()` function did not remove `__name__` label. #15250 + +## 2.55.0 / 2024-10-22 + +* [FEATURE] PromQL: Add experimental `info` function. #14495 +* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727 +* [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817 +* [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815 +* [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734 +* [FEATURE] OTLP receiver: Optional promotion of resource attributes to series labels. #14200 +* [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346 +* [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403 +* [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506 +* [FEATURE] TSDB: Add `delayed-compaction` feature flag, for people running many Prometheus to randomize timing. #12532 +* [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706 +* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 +* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379 +* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450 +* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477 +* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655, #14985 +* [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621 +* [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413 +* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816 +* [ENHANCEMENT] API: Support multiple listening addresses. #14665 +* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934 +* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948, #15120 +* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729 +* [BUGFIX] PromQL: make sort_by_label stable. #14985 +* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147 +* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622 +* [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810 +* [BUGFIX] Remote-Write: fix metadata sending for experimental Remote-Write V2. #14766 +* [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716 +* [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14513, #14575, #14598, #14609, #14611, #14771, #14821 +* [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042 + +## 2.54.1 / 2024-08-27 + +* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps (mixing samples of the same series with and without timestamps is still rejected). #14685 +* [BUGFIX] Docker SD: fix crash in `match_first_network` mode when container is reconnected to a new network. #14654 +* [BUGFIX] PromQL: fix experimental native histograms getting corrupted due to vector selector bug in range queries. #14538 +* [BUGFIX] PromQL: fix experimental native histogram counter reset detection on stale samples. #14514 +* [BUGFIX] PromQL: fix native histograms getting corrupted due to vector selector bug in range queries. #14605 + +## 2.54.0 / 2024-08-09 + +Release 2.54 brings a release candidate of a major new version of [Remote Write: 2.0](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +This is experimental at this time and may still change. +Remote-write v2 is enabled by default, but can be disabled via feature-flag `web.remote-write-receiver.accepted-protobuf-messages`. + +* [CHANGE] Remote-Write: `highest_timestamp_in_seconds` and `queue_highest_sent_timestamp_seconds` metrics now initialized to 0. #14437 +* [CHANGE] API: Split warnings from info annotations in API response. #14327 +* [FEATURE] Remote-Write: Version 2.0 experimental, plus metadata in WAL via feature flag `metadata-wal-records` (defaults on). #14395,#14427,#14444 +* [FEATURE] PromQL: add limitk() and limit_ratio() aggregation operators. #12503 +* [ENHANCEMENT] PromQL: Accept underscores in literal numbers, e.g. 1_000_000 for 1 million. #12821 +* [ENHANCEMENT] PromQL: float literal numbers and durations are now interchangeable (experimental). Example: `time() - my_timestamp > 10m`. #9138 +* [ENHANCEMENT] PromQL: use Kahan summation for sum(). #14074,#14362 +* [ENHANCEMENT] PromQL (experimental native histograms): Optimize `histogram_count` and `histogram_sum` functions. #14097 +* [ENHANCEMENT] TSDB: Better support for out-of-order experimental native histogram samples. #14438 +* [ENHANCEMENT] TSDB: Optimise seek within index. #14393 +* [ENHANCEMENT] TSDB: Optimise deletion of stale series. #14307 +* [ENHANCEMENT] TSDB: Reduce locking to optimise adding and removing series. #13286,#14286 +* [ENHANCEMENT] TSDB: Small optimisation: streamline special handling for out-of-order data. #14396,#14584 +* [ENHANCEMENT] Regexps: Optimize patterns with multiple prefixes. #13843,#14368 +* [ENHANCEMENT] Regexps: Optimize patterns containing multiple literal strings. #14173 +* [ENHANCEMENT] AWS SD: expose Primary IPv6 addresses as __meta_ec2_primary_ipv6_addresses. #14156 +* [ENHANCEMENT] Docker SD: add MatchFirstNetwork for containers with multiple networks. #10490 +* [ENHANCEMENT] OpenStack SD: Use `flavor.original_name` if available. #14312 +* [ENHANCEMENT] UI (experimental native histograms): more accurate representation. #13680,#14430 +* [ENHANCEMENT] Agent: `out_of_order_time_window` config option now applies to agent. #14094 +* [ENHANCEMENT] Notifier: Send any outstanding Alertmanager notifications when shutting down. #14290 +* [ENHANCEMENT] Rules: Add label-matcher support to Rules API. #10194 +* [ENHANCEMENT] HTTP API: Add url to message logged on error while sending response. #14209 +* [BUGFIX] TSDB: Exclude OOO chunks mapped after compaction starts (introduced by #14396). #14584 +* [BUGFIX] CLI: escape `|` characters when generating docs. #14420 +* [BUGFIX] PromQL (experimental native histograms): Fix some binary operators between native histogram values. #14454 +* [BUGFIX] TSDB: LabelNames API could fail during compaction. #14279 +* [BUGFIX] TSDB: Fix rare issue where pending OOO read can be left dangling if creating querier fails. #14341 +* [BUGFIX] TSDB: fix check for context cancellation in LabelNamesFor. #14302 +* [BUGFIX] Rules: Fix rare panic on reload. #14366 +* [BUGFIX] Config: In YAML marshalling, do not output a regexp field if it was never set. #14004 +* [BUGFIX] Remote-Write: reject samples with future timestamps. #14304 +* [BUGFIX] Remote-Write: Fix data corruption in remote write if max_sample_age is applied. #14078 +* [BUGFIX] Notifier: Fix Alertmanager discovery not updating under heavy load. #14174 +* [BUGFIX] Regexes: some Unicode characters were not matched by case-insensitive comparison. #14170,#14299 +* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515 ## 2.53.1 / 2024-07-10 @@ -35,6 +349,7 @@ This release changes the default for GOGC, the Go runtime control for the trade- ## 2.52.0 / 2024-05-07 * [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633 +* [CHANGE] Scrape: Multiple samples (even with different timestamps) are treated as duplicates during one scrape. * [FEATURE] Kubernetes SD: Add a new metric `prometheus_sd_kubernetes_failures_total` to track failed requests to Kubernetes API. #13554 * [FEATURE] Kubernetes SD: Add node and zone metadata labels when using the endpointslice role. #13935 * [FEATURE] Azure SD/Remote Write: Allow usage of Azure authorization SDK. #13099 @@ -48,7 +363,7 @@ This release changes the default for GOGC, the Go runtime control for the trade- * [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754 * [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772 * [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823 -* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838 +* [ENHANCEMENT] Observability: Log chunk snapshot and mmapped chunk replay duration during WAL replay. #13838 * [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846 * [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667 * [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852 @@ -585,7 +900,7 @@ The binaries published with this release are built with Go1.17.8 to avoid [CVE-2 ## 2.33.0 / 2022-01-29 -* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121 +* [CHANGE] PromQL: Promote negative offset and `@` modifier to stable features. #10121 * [CHANGE] Web: Promote remote-write-receiver to stable. #10119 * [FEATURE] Config: Add `stripPort` template function. #10002 * [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045 @@ -822,7 +1137,7 @@ This vulnerability has been reported by Aaron Devaney from MDSec. * [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682 * [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659 * [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790 -* [BUGFIX] TSDB: Avoid panic when mmaped memory is referenced after the file is closed. #8723 +* [BUGFIX] TSDB: Avoid panic when mmapped memory is referenced after the file is closed. #8723 * [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737 * [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766 @@ -1748,7 +2063,7 @@ information, read the announcement blog post and migration guide. ## 1.7.0 / 2017-06-06 * [CHANGE] Compress remote storage requests and responses with unframed/raw snappy. -* [CHANGE] Properly ellide secrets in config. +* [CHANGE] Properly elide secrets in config. * [FEATURE] Add OpenStack service discovery. * [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces. * [FEATURE] Add metric for discovered number of Alertmanagers. diff --git a/Dockerfile b/Dockerfile index b47f77dcd6..31e863d8a0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,27 +2,23 @@ ARG ARCH="amd64" ARG OS="linux" FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest LABEL maintainer="The Prometheus Authors " +LABEL org.opencontainers.image.source="https://github.com/prometheus/prometheus" ARG ARCH="amd64" ARG OS="linux" COPY .build/${OS}-${ARCH}/prometheus /bin/prometheus COPY .build/${OS}-${ARCH}/promtool /bin/promtool COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml -COPY console_libraries/ /usr/share/prometheus/console_libraries/ -COPY consoles/ /usr/share/prometheus/consoles/ COPY LICENSE /LICENSE COPY NOTICE /NOTICE COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2 WORKDIR /prometheus -RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ && \ - chown -R nobody:nobody /etc/prometheus /prometheus +RUN chown -R nobody:nobody /etc/prometheus /prometheus && chmod g+w /prometheus USER nobody EXPOSE 9090 VOLUME [ "/prometheus" ] ENTRYPOINT [ "/bin/prometheus" ] CMD [ "--config.file=/etc/prometheus/prometheus.yml", \ - "--storage.tsdb.path=/prometheus", \ - "--web.console.libraries=/usr/share/prometheus/console_libraries", \ - "--web.console.templates=/usr/share/prometheus/consoles" ] + "--storage.tsdb.path=/prometheus" ] diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 3661ddaa0a..8d10a8fbca 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -2,7 +2,6 @@ General maintainers: * Bryan Boreham (bjboreham@gmail.com / @bboreham) -* Levi Harrison (levi@leviharrison.dev / @LeviHarrison) * Ayoub Mrini (ayoubmrini424@gmail.com / @machine424) * Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie) @@ -10,16 +9,17 @@ Maintainers for specific parts of the codebase: * `cmd` * `promtool`: David Leadbeater ( / @dgl) * `discovery` + * `azure`: Jan-Otto Kröpke ( / @jkroepke) * `k8s`: Frederic Branczyk ( / @brancz) + * `stackit`: Jan-Otto Kröpke ( / @jkroepke) * `documentation` * `prometheus-mixin`: Matthias Loibl ( / @metalmatze) -* `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7), +* `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7), George Krajcsovits ( / @krajorama) * `storage` * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( / @npazosmendez), Alex Greenbank ( / @alexgreenbank) - * `otlptranslator`: Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez) + * `otlptranslator`: Arthur Silva Sens ( / @ArthurSens), Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez) * `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez) - * `agent`: Robert Fratto ( / @rfratto) * `web` * `ui`: Julius Volz ( / @juliusv) * `module`: Augustin Husson ( @nexucis) diff --git a/Makefile b/Makefile index f2bb3fcb7a..0b5935de00 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,11 @@ include Makefile.common DOCKER_IMAGE_NAME ?= prometheus +# Only build UI if PREBUILT_ASSETS_STATIC_DIR is not set +ifdef PREBUILT_ASSETS_STATIC_DIR + SKIP_UI_BUILD = true +endif + .PHONY: update-npm-deps update-npm-deps: @echo ">> updating npm dependencies" @@ -42,13 +47,17 @@ upgrade-npm-deps: .PHONY: ui-bump-version ui-bump-version: - version=$$(sed s/2/0/ < VERSION) && ./scripts/ui_release.sh --bump-version "$${version}" + version=$$(./scripts/get_module_version.sh) && ./scripts/ui_release.sh --bump-version "$${version}" cd web/ui && npm install git add "./web/ui/package-lock.json" "./**/package.json" .PHONY: ui-install ui-install: cd $(UI_PATH) && npm install + # The old React app has been separated from the npm workspaces setup to avoid + # issues with conflicting dependencies. This is a temporary solution until the + # new Mantine-based UI is fully integrated and the old app can be removed. + cd $(UI_PATH)/react-app && npm install .PHONY: ui-build ui-build: @@ -65,10 +74,30 @@ ui-test: .PHONY: ui-lint ui-lint: cd $(UI_PATH) && npm run lint + # The old React app has been separated from the npm workspaces setup to avoid + # issues with conflicting dependencies. This is a temporary solution until the + # new Mantine-based UI is fully integrated and the old app can be removed. + cd $(UI_PATH)/react-app && npm run lint .PHONY: assets +ifndef SKIP_UI_BUILD assets: ui-install ui-build +.PHONY: npm_licenses +npm_licenses: ui-install + @echo ">> bundling npm licenses" + rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses + ln -s . npm_licenses + find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=- + rm -f npm_licenses +else +assets: + @echo '>> skipping assets build, pre-built assets provided' + +npm_licenses: + @echo '>> skipping assets npm licenses, pre-built assets provided' +endif + .PHONY: assets-compress assets-compress: assets @echo '>> compressing assets' @@ -117,14 +146,6 @@ else test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version endif -.PHONY: npm_licenses -npm_licenses: ui-install - @echo ">> bundling npm licenses" - rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses - ln -s . npm_licenses - find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=- - rm -f npm_licenses - .PHONY: tarball tarball: npm_licenses common-tarball diff --git a/Makefile.common b/Makefile.common index e3da72ab47..4de21512ff 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,8 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.1 +GOLANGCI_LINT_VERSION ?= v2.1.5 +GOLANGCI_FMT_OPTS ?= # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -156,9 +157,13 @@ $(GOTEST_DIR): @mkdir -p $@ .PHONY: common-format -common-format: +common-format: $(GOLANGCI_LINT) @echo ">> formatting code" $(GO) fmt $(pkgs) +ifdef GOLANGCI_LINT + @echo ">> formatting code with golangci-lint" + $(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS) +endif .PHONY: common-vet common-vet: @@ -248,8 +253,8 @@ $(PROMU): cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) -.PHONY: proto -proto: +.PHONY: common-proto +common-proto: @echo ">> generating code from proto files" @./scripts/genproto.sh @@ -275,3 +280,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/README.md b/README.md index cd14ed2ecb..26262734c0 100644 --- a/README.md +++ b/README.md @@ -12,9 +12,10 @@ examples and guides.

[![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub] [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus) +[![CLOMonitor](https://img.shields.io/endpoint?url=https://clomonitor.io/api/projects/cncf/prometheus/badge)](https://clomonitor.io/projects/cncf/prometheus) [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus) -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus) @@ -66,9 +67,9 @@ Prometheus will now be reachable at . To build Prometheus from source code, You need: -* Go [version 1.17 or greater](https://golang.org/doc/install). -* NodeJS [version 16 or greater](https://nodejs.org/). -* npm [version 7 or greater](https://www.npmjs.com/). +* Go [version 1.22 or greater](https://golang.org/doc/install). +* NodeJS [version 22 or greater](https://nodejs.org/). +* npm [version 8 or greater](https://www.npmjs.com/). Start by cloning the repository: @@ -114,7 +115,7 @@ The Makefile provides several targets: Prometheus is bundled with many service discovery plugins. When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml) -file to disable some service discoveries. The file is a yaml-formated list of go +file to disable some service discoveries. The file is a yaml-formatted list of go import path that will be built into the Prometheus binary. After you have changed the file, you @@ -129,7 +130,6 @@ always, be extra careful when loading third party code. ### Building the Docker image -The `make docker` target is designed for use in our CI system. You can build a docker image locally with the following commands: ```bash @@ -139,6 +139,9 @@ make npm_licenses make common-docker-amd64 ``` +The `make docker` target is intended only for use in our CI system and will not +produce a fully working image when run locally. + ## Using Prometheus as a Go Library ### Remote Write @@ -157,8 +160,19 @@ This is experimental. ### Prometheus code base In order to comply with [go mod](https://go.dev/ref/mod#versions) rules, -Prometheus release number do not exactly match Go module releases. For the -Prometheus v2.y.z releases, we are publishing equivalent v0.y.z tags. +Prometheus release number do not exactly match Go module releases. + +For the +Prometheus v3.y.z releases, we are publishing equivalent v0.3y.z tags. The y in v0.3y.z is always padded to two digits, with a leading zero if needed. + +Therefore, a user that would want to use Prometheus v3.0.0 as a library could do: + +```shell +go get github.com/prometheus/prometheus@v0.300.0 +``` + +For the +Prometheus v2.y.z releases, we published the equivalent v0.y.z tags. Therefore, a user that would want to use Prometheus v2.35.0 as a library could do: @@ -176,7 +190,7 @@ For more information on building, running, and developing on the React-based UI, ## More information -* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v2.x.y will be displayed as v0.x.y. +* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v3.y.z will be displayed as v0.3y.z (the y in v0.3y.z is always padded to two digits, with a leading zero if needed), while v2.y.z will be displayed as v0.y.z. * See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels. ## Contributing diff --git a/RELEASE.md b/RELEASE.md index 0d3f7456cd..a7032bd95e 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -5,60 +5,17 @@ This page describes the release process and the currently planned schedule for u ## Release schedule Release cadence of first pre-releases being cut is 6 weeks. +Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/release-2.55/RELEASE.md) for the v2 release series schedule. -| release series | date of first pre-release (year-month-day) | release shepherd | -|----------------|--------------------------------------------|---------------------------------------------| -| v2.4 | 2018-09-06 | Goutham Veeramachaneni (GitHub: @gouthamve) | -| v2.5 | 2018-10-24 | Frederic Branczyk (GitHub: @brancz) | -| v2.6 | 2018-12-05 | Simon Pasquier (GitHub: @simonpasquier) | -| v2.7 | 2019-01-16 | Goutham Veeramachaneni (GitHub: @gouthamve) | -| v2.8 | 2019-02-27 | Ganesh Vernekar (GitHub: @codesome) | -| v2.9 | 2019-04-10 | Brian Brazil (GitHub: @brian-brazil) | -| v2.10 | 2019-05-22 | Björn Rabenstein (GitHub: @beorn7) | -| v2.11 | 2019-07-03 | Frederic Branczyk (GitHub: @brancz) | -| v2.12 | 2019-08-14 | Julius Volz (GitHub: @juliusv) | -| v2.13 | 2019-09-25 | Krasi Georgiev (GitHub: @krasi-georgiev) | -| v2.14 | 2019-11-06 | Chris Marchbanks (GitHub: @csmarchbanks) | -| v2.15 | 2019-12-18 | Bartek Plotka (GitHub: @bwplotka) | -| v2.16 | 2020-01-29 | Callum Styan (GitHub: @cstyan) | -| v2.17 | 2020-03-11 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.18 | 2020-04-22 | Bartek Plotka (GitHub: @bwplotka) | -| v2.19 | 2020-06-03 | Ganesh Vernekar (GitHub: @codesome) | -| v2.20 | 2020-07-15 | Björn Rabenstein (GitHub: @beorn7) | -| v2.21 | 2020-08-26 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.22 | 2020-10-07 | Frederic Branczyk (GitHub: @brancz) | -| v2.23 | 2020-11-18 | Ganesh Vernekar (GitHub: @codesome) | -| v2.24 | 2020-12-30 | Björn Rabenstein (GitHub: @beorn7) | -| v2.25 | 2021-02-10 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.26 | 2021-03-24 | Bartek Plotka (GitHub: @bwplotka) | -| v2.27 | 2021-05-05 | Chris Marchbanks (GitHub: @csmarchbanks) | -| v2.28 | 2021-06-16 | Julius Volz (GitHub: @juliusv) | -| v2.29 | 2021-07-28 | Frederic Branczyk (GitHub: @brancz) | -| v2.30 | 2021-09-08 | Ganesh Vernekar (GitHub: @codesome) | -| v2.31 | 2021-10-20 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.32 | 2021-12-01 | Julius Volz (GitHub: @juliusv) | -| v2.33 | 2022-01-12 | Björn Rabenstein (GitHub: @beorn7) | -| v2.34 | 2022-02-23 | Chris Marchbanks (GitHub: @csmarchbanks) | -| v2.35 | 2022-04-06 | Augustin Husson (GitHub: @nexucis) | -| v2.36 | 2022-05-18 | Matthias Loibl (GitHub: @metalmatze) | -| v2.37 LTS | 2022-06-29 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.38 | 2022-08-10 | Julius Volz (GitHub: @juliusv) | -| v2.39 | 2022-09-21 | Ganesh Vernekar (GitHub: @codesome) | -| v2.40 | 2022-11-02 | Ganesh Vernekar (GitHub: @codesome) | -| v2.41 | 2022-12-14 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) | -| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) | -| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | -| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) | -| v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) | -| v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) | -| v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) | -| v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) | -| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) | -| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) | -| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) | +| release series | date of first pre-release (year-month-day) | release shepherd | +|----------------|--------------------------------------------|------------------------------------| +| v3.0 | 2024-11-14 | Jan Fajerski (GitHub: @jan--f) | +| v3.1 | 2024-12-17 | Bryan Boreham (GitHub: @bboreham) | +| v3.2 | 2025-01-28 | Jan Fajerski (GitHub: @jan--f) | +| v3.3 | 2025-03-11 | Ayoub Mrini (Github: @machine424) | +| v3.4 | 2025-04-29 | Jan-Otto Kröpke (Github: @jkroepke)| +| v3.5 LTS | 2025-06-03 | Bryan Boreham (GitHub: @bboreham) | +| v3.6 | 2025-07-15 | **volunteer welcome** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. @@ -180,19 +137,7 @@ git tag -s "${tag}" -m "${tag}" git push origin "${tag}" ``` -Go modules versioning requires strict use of semver. Because we do not commit to -avoid code-level breaking changes for the libraries between minor releases of -the Prometheus server, we use major version zero releases for the libraries. - -Tag the new library release via the following commands: - -```bash -tag="v$(sed s/2/0/ < VERSION)" -git tag -s "${tag}" -m "${tag}" -git push origin "${tag}" -``` - -Optionally, you can use this handy `.gitconfig` alias. +Alternatively, you can use this handy `.gitconfig` alias. ```ini [alias] @@ -203,12 +148,27 @@ Then release with `git tag-release`. Signing a tag with a GPG key is appreciated, but in case you can't add a GPG key to your Github account using the following [procedure](https://help.github.com/articles/generating-a-gpg-key/), you can replace the `-s` flag by `-a` flag of the `git tag` command to only annotate the tag without signing. -Once a tag is created, the release process through CircleCI will be triggered for this tag and Circle CI will draft the GitHub release using the `prombot` account. +Once a tag is created, the release process through Github Actions will be triggered for this tag and Github Actions will draft the GitHub release using the `prombot` account. Finally, wait for the build step for the tag to finish. The point here is to wait for tarballs to be uploaded to the Github release and the container images to be pushed to the Docker Hub and Quay.io. Once that has happened, click _Publish release_, which will make the release publicly visible and create a GitHub notification. **Note:** for a release candidate version ensure the _This is a pre-release_ box is checked when drafting the release in the Github UI. The CI job should take care of this but it's a good idea to double check before clicking _Publish release_.` -### 3. Wrapping up +### 3. Tag the library release + +Go modules versioning requires strict use of semver. Because we do not commit to +avoid code-level breaking changes for the libraries between minor releases of +the Prometheus server, we use major version zero releases for the libraries. + +Tagging the new library release works similar to the normal release tagging, +but without the subsequent build and publish steps. Use the following commands: + +```bash +tag="v$(./scripts/get_module_version.sh)" +git tag -s "${tag}" -m "${tag}" +git push origin "${tag}" +``` + +### 4. Wrapping up For release candidate versions (`v2.16.0-rc.0`), run the benchmark for 3 days using the `/prombench vX.Y.Z` command, `vX.Y.Z` being the latest stable patch release's tag of the previous minor release series, such as `v2.15.2`. diff --git a/SECURITY-INSIGHTS.yml b/SECURITY-INSIGHTS.yml new file mode 100644 index 0000000000..009b356214 --- /dev/null +++ b/SECURITY-INSIGHTS.yml @@ -0,0 +1,48 @@ +header: + schema-version: '1.0.0' + expiration-date: '2025-07-30T01:00:00.000Z' + last-updated: '2024-07-30' + last-reviewed: '2024-07-30' + project-url: https://github.com/prometheus/prometheus + changelog: https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md + license: https://github.com/prometheus/prometheus/blob/main/LICENSE +project-lifecycle: + status: active + bug-fixes-only: false + core-maintainers: + - https://github.com/prometheus/prometheus/blob/main/MAINTAINERS.md +contribution-policy: + accepts-pull-requests: true + accepts-automated-pull-requests: true +dependencies: + third-party-packages: true + dependencies-lists: + - https://github.com/prometheus/prometheus/blob/main/go.mod + - https://github.com/prometheus/prometheus/blob/main/web/ui/package.json + env-dependencies-policy: + policy-url: https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md#dependency-management +distribution-points: + - https://github.com/prometheus/prometheus/releases +documentation: + - https://prometheus.io/docs/introduction/overview/ +security-contacts: + - type: email + value: prometheus-team@googlegroups.com +security-testing: + - tool-type: sca + tool-name: Dependabot + tool-version: latest + integration: + ad-hoc: false + ci: true + before-release: true + - tool-type: sast + tool-name: CodeQL + tool-version: latest + integration: + ad-hoc: false + ci: true + before-release: true +vulnerability-reporting: + accepts-vulnerability-reports: true + security-policy: https://github.com/prometheus/prometheus/security/policy diff --git a/VERSION b/VERSION index f419e2c6f1..4d9d11cf50 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.53.1 +3.4.2 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 1d844ddba6..ed7aa52c8a 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -18,17 +18,19 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "math/bits" "net" "net/http" - _ "net/http/pprof" // Comment this line to disable pprof endpoint. "net/url" "os" "os/signal" "path/filepath" + goregexp "regexp" //nolint:depguard // The Prometheus client library requires us to pass a regexp from this package. "runtime" "runtime/debug" + "slices" "strconv" "strings" "sync" @@ -38,8 +40,6 @@ import ( "github.com/KimMachineGun/automemlimit/memlimit" "github.com/alecthomas/kingpin/v2" "github.com/alecthomas/units" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/mwitkow/go-conntrack" "github.com/oklog/run" @@ -47,8 +47,8 @@ import ( "github.com/prometheus/client_golang/prometheus/collectors" versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/common/model" - "github.com/prometheus/common/promlog" - promlogflag "github.com/prometheus/common/promlog/flag" + "github.com/prometheus/common/promslog" + promslogflag "github.com/prometheus/common/promslog/flag" "github.com/prometheus/common/version" toolkit_web "github.com/prometheus/exporter-toolkit/web" "go.uber.org/atomic" @@ -58,8 +58,6 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/legacymanager" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -76,13 +74,53 @@ import ( "github.com/prometheus/prometheus/tracing" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/agent" - "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/compression" "github.com/prometheus/prometheus/util/documentcli" "github.com/prometheus/prometheus/util/logging" + "github.com/prometheus/prometheus/util/notifications" prom_runtime "github.com/prometheus/prometheus/util/runtime" "github.com/prometheus/prometheus/web" ) +// klogv1OutputCallDepth is the stack depth where we can find the origin of this call. +const klogv1OutputCallDepth = 6 + +// klogv1DefaultPrefixLength is the length of the log prefix that we have to strip out. +const klogv1DefaultPrefixLength = 53 + +// klogv1Writer is used in SetOutputBySeverity call below to redirect any calls +// to klogv1 to end up in klogv2. +// This is a hack to support klogv1 without use of go-kit/log. It is inspired +// by klog's upstream klogv1/v2 coexistence example: +// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go +type klogv1Writer struct{} + +// Write redirects klogv1 calls to klogv2. +// This is a hack to support klogv1 without use of go-kit/log. It is inspired +// by klog's upstream klogv1/v2 coexistence example: +// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go +func (kw klogv1Writer) Write(p []byte) (n int, err error) { + if len(p) < klogv1DefaultPrefixLength { + klogv2.InfoDepth(klogv1OutputCallDepth, string(p)) + return len(p), nil + } + + switch p[0] { + case 'I': + klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + case 'W': + klogv2.WarningDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + case 'E': + klogv2.ErrorDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + case 'F': + klogv2.FatalDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + default: + klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:])) + } + + return len(p), nil +} + var ( appName = "prometheus" @@ -103,6 +141,10 @@ var ( ) func init() { + // This can be removed when the legacy global mode is fully deprecated. + //nolint:staticcheck + model.NameValidationScheme = model.UTF8Validation + prometheus.MustRegister(versioncollector.NewCollector(strings.ReplaceAll(appName, "-", "_"))) var err error @@ -115,7 +157,7 @@ func init() { // serverOnlyFlag creates server-only kingpin flag. func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause { return app.Flag(name, fmt.Sprintf("%s Use with server mode only.", help)). - PreAction(func(parseContext *kingpin.ParseContext) error { + PreAction(func(_ *kingpin.ParseContext) error { // This will be invoked only if flag is actually provided by user. serverOnlyFlags = append(serverOnlyFlags, "--"+name) return nil @@ -125,7 +167,7 @@ func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagCl // agentOnlyFlag creates agent-only kingpin flag. func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause { return app.Flag(name, fmt.Sprintf("%s Use with agent mode only.", help)). - PreAction(func(parseContext *kingpin.ParseContext) error { + PreAction(func(_ *kingpin.ParseContext) error { // This will be invoked only if flag is actually provided by user. agentOnlyFlags = append(agentOnlyFlags, "--"+name) return nil @@ -135,129 +177,152 @@ func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagCla type flagConfig struct { configFile string - agentStoragePath string - serverStoragePath string - notifier notifier.Options - forGracePeriod model.Duration - outageTolerance model.Duration - resendDelay model.Duration - maxConcurrentEvals int64 - web web.Options - scrape scrape.Options - tsdb tsdbOptions - agent agentOptions - lookbackDelta model.Duration - webTimeout model.Duration - queryTimeout model.Duration - queryConcurrency int - queryMaxSamples int - RemoteFlushDeadline model.Duration + agentStoragePath string + serverStoragePath string + notifier notifier.Options + forGracePeriod model.Duration + outageTolerance model.Duration + resendDelay model.Duration + maxConcurrentEvals int64 + web web.Options + scrape scrape.Options + tsdb tsdbOptions + agent agentOptions + lookbackDelta model.Duration + webTimeout model.Duration + queryTimeout model.Duration + queryConcurrency int + queryMaxSamples int + RemoteFlushDeadline model.Duration + maxNotificationsSubscribers int - featureList []string - memlimitRatio float64 + enableAutoReload bool + autoReloadInterval model.Duration + + maxprocsEnable bool + memlimitEnable bool + memlimitRatio float64 + + featureList []string // These options are extracted from featureList // for ease of use. - enableExpandExternalLabels bool - enableNewSDManager bool - enablePerStepStats bool - enableAutoGOMAXPROCS bool - enableAutoGOMEMLIMIT bool - enableConcurrentRuleEval bool + enablePerStepStats bool + enableConcurrentRuleEval bool prometheusURL string corsRegexString string - promlogConfig promlog.Config + promqlEnableDelayedNameRemoval bool + + promslogConfig promslog.Config } // setFeatureListOptions sets the corresponding options from the featureList. -func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { +func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error { for _, f := range c.featureList { opts := strings.Split(f, ",") for _, o := range opts { switch o { - case "remote-write-receiver": - c.web.EnableRemoteWriteReceiver = true - level.Warn(logger).Log("msg", "Remote write receiver enabled via feature flag remote-write-receiver. This is DEPRECATED. Use --web.enable-remote-write-receiver.") - case "otlp-write-receiver": - c.web.EnableOTLPWriteReceiver = true - level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled") - case "expand-external-labels": - c.enableExpandExternalLabels = true - level.Info(logger).Log("msg", "Experimental expand-external-labels enabled") case "exemplar-storage": c.tsdb.EnableExemplarStorage = true - level.Info(logger).Log("msg", "Experimental in-memory exemplar storage enabled") + logger.Info("Experimental in-memory exemplar storage enabled") case "memory-snapshot-on-shutdown": c.tsdb.EnableMemorySnapshotOnShutdown = true - level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled") + logger.Info("Experimental memory snapshot on shutdown enabled") case "extra-scrape-metrics": c.scrape.ExtraMetrics = true - level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled") + logger.Info("Experimental additional scrape metrics enabled") case "metadata-wal-records": c.scrape.AppendMetadata = true - level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0") - case "new-service-discovery-manager": - c.enableNewSDManager = true - level.Info(logger).Log("msg", "Experimental service discovery manager") - case "agent": - agentMode = true - level.Info(logger).Log("msg", "Experimental agent mode enabled.") + logger.Info("Experimental metadata records in WAL enabled") case "promql-per-step-stats": c.enablePerStepStats = true - level.Info(logger).Log("msg", "Experimental per-step statistics reporting") - case "auto-gomaxprocs": - c.enableAutoGOMAXPROCS = true - level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota") - case "auto-gomemlimit": - c.enableAutoGOMEMLIMIT = true - level.Info(logger).Log("msg", "Automatically set GOMEMLIMIT to match Linux container or system memory limit") + logger.Info("Experimental per-step statistics reporting") + case "auto-reload-config": + c.enableAutoReload = true + if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 { + c.autoReloadInterval, _ = model.ParseDuration("1s") + } + logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval) case "concurrent-rule-eval": c.enableConcurrentRuleEval = true - level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.") - case "no-default-scrape-port": - c.scrape.NoDefaultPort = true - level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.") + logger.Info("Experimental concurrent rule evaluation enabled.") case "promql-experimental-functions": parser.EnableExperimentalFunctions = true - level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") + logger.Info("Experimental PromQL functions enabled.") + case "promql-duration-expr": + parser.ExperimentalDurationExpr = true + logger.Info("Experimental duration expression parsing enabled.") case "native-histograms": c.tsdb.EnableNativeHistograms = true c.scrape.EnableNativeHistogramsIngestion = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols - level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + logger.Info("Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + case "ooo-native-histograms": + logger.Warn("This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o) case "created-timestamp-zero-ingestion": c.scrape.EnableCreatedTimestampZeroIngestion = true + c.web.CTZeroIngestionEnabled = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols - level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + logger.Info("Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + case "delayed-compaction": + c.tsdb.EnableDelayedCompaction = true + logger.Info("Experimental delayed compaction is enabled.") + case "promql-delayed-name-removal": + c.promqlEnableDelayedNameRemoval = true + logger.Info("Experimental PromQL delayed name removal enabled.") case "": continue - case "promql-at-modifier", "promql-negative-offset": - level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o) + case "old-ui": + c.web.UseOldUI = true + logger.Info("Serving previous version of the Prometheus web UI.") + case "otlp-deltatocumulative": + c.web.ConvertOTLPDelta = true + logger.Info("Converting delta OTLP metrics to cumulative") + case "otlp-native-delta-ingestion": + // Experimental OTLP native delta ingestion. + // This currently just stores the raw delta value as-is with unknown metric type. Better typing and + // type-aware functions may come later. + // See proposal: https://github.com/prometheus/proposals/pull/48 + c.web.NativeOTLPDeltaIngestion = true + logger.Info("Enabling native ingestion of delta OTLP metrics, storing the raw sample values without conversion. WARNING: Delta support is in an early stage of development. The ingestion and querying process is likely to change over time.") + case "type-and-unit-labels": + c.scrape.EnableTypeAndUnitLabels = true + logger.Info("Experimental type and unit labels enabled") + case "use-uncached-io": + c.tsdb.UseUncachedIO = true + logger.Info("Experimental Uncached IO is enabled.") default: - level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o) + logger.Warn("Unknown option for --enable-feature", "option", o) } } } + if c.web.ConvertOTLPDelta && c.web.NativeOTLPDeltaIngestion { + return errors.New("cannot enable otlp-deltatocumulative and otlp-native-delta-ingestion features at the same time") + } + return nil } +// parseCompressionType parses the two compression-related configuration values and returns the CompressionType. +func parseCompressionType(compress bool, compressType compression.Type) compression.Type { + if compress { + return compressType + } + return compression.None +} + func main() { if os.Getenv("DEBUG") != "" { runtime.SetBlockProfileRate(20) runtime.SetMutexProfileFraction(20) } - var ( - oldFlagRetentionDuration model.Duration - newFlagRetentionDuration model.Duration - ) - // Unregister the default GoCollector, and reregister with our defaults. if prometheus.Unregister(collectors.NewGoCollector()) { prometheus.MustRegister( @@ -265,6 +330,7 @@ func main() { collectors.WithGoCollectorRuntimeMetrics( collectors.MetricsGC, collectors.MetricsScheduler, + collectors.GoRuntimeMetricsRule{Matcher: goregexp.MustCompile(`^/sync/mutex/wait/total:seconds$`)}, ), ), ) @@ -278,7 +344,7 @@ func main() { Registerer: prometheus.DefaultRegisterer, Gatherer: prometheus.DefaultGatherer, }, - promlogConfig: promlog.Config{}, + promslogConfig: promslog.Config{}, } a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout) @@ -290,9 +356,16 @@ func main() { a.Flag("config.file", "Prometheus configuration file path."). Default("prometheus.yml").StringVar(&cfg.configFile) - a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry."). - Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress) + a.Flag("config.auto-reload-interval", "Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes."). + Default("30s").SetValue(&cfg.autoReloadInterval) + a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated."). + Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses) + + a.Flag("auto-gomaxprocs", "Automatically set GOMAXPROCS to match Linux container CPU quota"). + Default("true").BoolVar(&cfg.maxprocsEnable) + a.Flag("auto-gomemlimit", "Automatically set GOMEMLIMIT to match Linux container or system memory limit"). + Default("true").BoolVar(&cfg.memlimitEnable) a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory"). Default("0.9").FloatVar(&cfg.memlimitRatio) @@ -305,9 +378,12 @@ func main() { "Maximum duration before timing out read of the request, and closing idle connections."). Default("5m").SetValue(&cfg.webTimeout) - a.Flag("web.max-connections", "Maximum number of simultaneous connections."). + a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners."). Default("512").IntVar(&cfg.web.MaxConnections) + a.Flag("web.max-notifications-subscribers", "Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close."). + Default("16").IntVar(&cfg.maxNotificationsSubscribers) + a.Flag("web.external-url", "The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically."). PlaceHolder("").StringVar(&cfg.prometheusURL) @@ -334,6 +410,9 @@ func main() { a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())). Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs)) + a.Flag("web.enable-otlp-receiver", "Enable API endpoint accepting OTLP write requests."). + Default("false").BoolVar(&cfg.web.EnableOTLPWriteReceiver) + a.Flag("web.console.templates", "Path to the console template directory, available at /consoles."). Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath) @@ -364,11 +443,8 @@ func main() { "Size at which to split the tsdb WAL segment files. Example: 100MB"). Hidden().PlaceHolder("").BytesVar(&cfg.tsdb.WALSegmentSize) - serverOnlyFlag(a, "storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead."). - SetValue(&oldFlagRetentionDuration) - - serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms."). - SetValue(&newFlagRetentionDuration) + serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. If neither this flag nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms."). + SetValue(&cfg.tsdb.RetentionDuration) serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\". Based on powers-of-2, so 1KB is 1024B."). BytesVar(&cfg.tsdb.MaxBytes) @@ -376,16 +452,18 @@ func main() { serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory."). Default("false").BoolVar(&cfg.tsdb.NoLockfile) - // TODO: Remove in Prometheus 3.0. - var b bool - serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now."). - Default("true").Hidden().BoolVar(&b) + serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks."). + Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction) - serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). - Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) + var ( + tsdbWALCompression bool + tsdbWALCompressionType string + ) + serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL. If false, the --storage.tsdb.wal-compression-type flag is ignored."). + Hidden().Default("true").BoolVar(&tsdbWALCompression) - serverOnlyFlag(a, "storage.tsdb.wal-compression-type", "Compression algorithm for the tsdb WAL."). - Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.tsdb.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd)) + serverOnlyFlag(a, "storage.tsdb.wal-compression-type", "Compression algorithm for the tsdb WAL, used when --storage.tsdb.wal-compression is true."). + Hidden().Default(compression.Snappy).EnumVar(&tsdbWALCompressionType, compression.Snappy, compression.Zstd) serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental."). Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize) @@ -393,6 +471,9 @@ func main() { serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk."). Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk) + serverOnlyFlag(a, "storage.tsdb.delayed-compaction.max-percent", "Sets the upper limit for the random compaction delay, specified as a percentage of the head chunk range. 100 means the compaction can be delayed by up to the entire head chunk range. Only effective when the delayed-compaction feature flag is enabled."). + Default("10").Hidden().IntVar(&cfg.tsdb.CompactionDelayMaxPercent) + agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage."). Default("data-agent/").StringVar(&cfg.agentStoragePath) @@ -400,11 +481,15 @@ func main() { "Size at which to split WAL segment files. Example: 100MB"). Hidden().PlaceHolder("").BytesVar(&cfg.agent.WALSegmentSize) - agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL."). - Default("true").BoolVar(&cfg.agent.WALCompression) + var ( + agentWALCompression bool + agentWALCompressionType string + ) + agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL. If false, the --storage.agent.wal-compression-type flag is ignored."). + Default("true").BoolVar(&agentWALCompression) - agentOnlyFlag(a, "storage.agent.wal-compression-type", "Compression algorithm for the agent WAL."). - Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.agent.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd)) + agentOnlyFlag(a, "storage.agent.wal-compression-type", "Compression algorithm for the agent WAL, used when --storage.agent.wal-compression is true."). + Hidden().Default(compression.Snappy).EnumVar(&agentWALCompressionType, compression.Snappy, compression.Zstd) agentOnlyFlag(a, "storage.agent.wal-truncate-frequency", "The frequency at which to truncate the WAL and remove old data."). @@ -454,12 +539,12 @@ func main() { serverOnlyFlag(a, "alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications."). Default("10000").IntVar(&cfg.notifier.QueueCapacity) + serverOnlyFlag(a, "alertmanager.notification-batch-size", "The maximum number of notifications per batch to send to the Alertmanager."). + Default(strconv.Itoa(notifier.DefaultMaxBatchSize)).IntVar(&cfg.notifier.MaxBatchSize) + serverOnlyFlag(a, "alertmanager.drain-notification-queue-on-shutdown", "Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down."). Default("true").BoolVar(&cfg.notifier.DrainOnShutdown) - // TODO: Remove in Prometheus 3.0. - alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String() - serverOnlyFlag(a, "query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation."). Default("5m").SetValue(&cfg.lookbackDelta) @@ -475,12 +560,14 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) - promlogflag.AddFlags(a, &cfg.promlogConfig) + a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) - a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error { + promslogflag.AddFlags(a, &cfg.promslogConfig) + + a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(_ *kingpin.ParseContext) error { if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil { os.Exit(1) return err @@ -491,15 +578,21 @@ func main() { _, err := a.Parse(os.Args[1:]) if err != nil { - fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing command line arguments: %w", err)) + fmt.Fprintf(os.Stderr, "Error parsing command line arguments: %s\n", err) a.Usage(os.Args[1:]) os.Exit(2) } - logger := promlog.New(&cfg.promlogConfig) + logger := promslog.New(&cfg.promslogConfig) + slog.SetDefault(logger) + + notifs := notifications.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) + cfg.web.NotificationsSub = notifs.Sub + cfg.web.NotificationsGetter = notifs.Get + notifs.AddNotification(notifications.StartingUp) if err := cfg.setFeatureListOptions(logger); err != nil { - fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err)) + fmt.Fprintf(os.Stderr, "Error parsing feature list: %s\n", err) os.Exit(1) } @@ -523,7 +616,7 @@ func main() { localStoragePath = cfg.agentStoragePath } - cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress) + cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddresses[0]) if err != nil { fmt.Fprintln(os.Stderr, fmt.Errorf("parse external URL %q: %w", cfg.prometheusURL, err)) os.Exit(2) @@ -535,28 +628,37 @@ func main() { os.Exit(2) } - if *alertmanagerTimeout != "" { - level.Warn(logger).Log("msg", "The flag --alertmanager.timeout has no effect and will be removed in the future.") - } - // Throw error for invalid config before starting other components. var cfgFile *config.Config - if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil { + if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, promslog.NewNopLogger()); err != nil { absPath, pathErr := filepath.Abs(cfg.configFile) if pathErr != nil { absPath = cfg.configFile } - level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err) + logger.Error(fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err) os.Exit(2) } + // Get scrape configs to validate dynamically loaded scrape_config_files. + // They can change over time, but do the extra validation on startup for better experience. if _, err := cfgFile.GetScrapeConfigs(); err != nil { absPath, pathErr := filepath.Abs(cfg.configFile) if pathErr != nil { absPath = cfg.configFile } - level.Error(logger).Log("msg", fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err) + logger.Error(fmt.Sprintf("Error loading dynamic scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err) os.Exit(2) } + + // Parse rule files to verify they exist and contain valid rules. + if err := rules.ParseFiles(cfgFile.RuleFiles); err != nil { + absPath, pathErr := filepath.Abs(cfg.configFile) + if pathErr != nil { + absPath = cfg.configFile + } + logger.Error(fmt.Sprintf("Error loading rule file patterns from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err) + os.Exit(2) + } + if cfg.tsdb.EnableExemplarStorage { if cfgFile.StorageConfig.ExemplarsConfig == nil { cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig @@ -567,6 +669,32 @@ func main() { cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow } + // Set Go runtime parameters before we get too far into initialization. + updateGoGC(cfgFile, logger) + if cfg.maxprocsEnable { + l := func(format string, a ...interface{}) { + logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs") + } + if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil { + logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err) + } + } + + if cfg.memlimitEnable { + if _, err := memlimit.SetGoMemLimitWithOpts( + memlimit.WithRatio(cfg.memlimitRatio), + memlimit.WithProvider( + memlimit.ApplyFallback( + memlimit.FromCgroup, + memlimit.FromSystem, + ), + ), + memlimit.WithLogger(logger.With("component", "automemlimit")), + ); err != nil { + logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err) + } + } + // Now that the validity of the config is established, set the config // success metrics accordingly, although the config isn't really loaded // yet. This will happen later (including setting these metrics again), @@ -585,20 +713,9 @@ func main() { cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/") if !agentMode { - // Time retention settings. - if oldFlagRetentionDuration != 0 { - level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.") - cfg.tsdb.RetentionDuration = oldFlagRetentionDuration - } - - // When the new flag is set it takes precedence. - if newFlagRetentionDuration != 0 { - cfg.tsdb.RetentionDuration = newFlagRetentionDuration - } - if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 { cfg.tsdb.RetentionDuration = defaultRetentionDuration - level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) + logger.Info("No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) } // Check for overflows. This limits our max retention to 100y. @@ -608,7 +725,7 @@ func main() { panic(err) } cfg.tsdb.RetentionDuration = y - level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String()) + logger.Warn("Time retention value is too high. Limiting to: " + y.String()) } // Max block size settings. @@ -624,16 +741,23 @@ func main() { cfg.tsdb.MaxBlockDuration = maxBlockDuration } + + // Delayed compaction checks + if cfg.tsdb.EnableDelayedCompaction && (cfg.tsdb.CompactionDelayMaxPercent > 100 || cfg.tsdb.CompactionDelayMaxPercent <= 0) { + logger.Warn("The --storage.tsdb.delayed-compaction.max-percent should have a value between 1 and 100. Using default", "default", tsdb.DefaultCompactionDelayMaxPercent) + cfg.tsdb.CompactionDelayMaxPercent = tsdb.DefaultCompactionDelayMaxPercent + } + + cfg.tsdb.WALCompressionType = parseCompressionType(tsdbWALCompression, tsdbWALCompressionType) + } else { + cfg.agent.WALCompressionType = parseCompressionType(agentWALCompression, agentWALCompressionType) } noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{} noStepSubqueryInterval.Set(config.DefaultGlobalConfig.EvaluationInterval) - // Above level 6, the k8s client would log bearer tokens in clear-text. - klog.ClampLevel(6) - klog.SetLogger(log.With(logger, "component", "k8s_client_runtime")) - klogv2.ClampLevel(6) - klogv2.SetLogger(log.With(logger, "component", "k8s_client_runtime")) + klogv2.SetSlogLogger(logger.With("component", "k8s_client_runtime")) + klog.SetOutputBySeverity("INFO", klogv1Writer{}) modeAppName := "Prometheus Server" mode := "server" @@ -642,20 +766,22 @@ func main() { mode = "agent" } - level.Info(logger).Log("msg", "Starting "+modeAppName, "mode", mode, "version", version.Info()) + logger.Info("Starting "+modeAppName, "mode", mode, "version", version.Info()) if bits.UintSize < 64 { - level.Warn(logger).Log("msg", "This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH) + logger.Warn("This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH) } - level.Info(logger).Log("build_context", version.BuildContext()) - level.Info(logger).Log("host_details", prom_runtime.Uname()) - level.Info(logger).Log("fd_limits", prom_runtime.FdLimits()) - level.Info(logger).Log("vm_limits", prom_runtime.VMLimits()) + logger.Info("operational information", + "build_context", version.BuildContext(), + "host_details", prom_runtime.Uname(), + "fd_limits", prom_runtime.FdLimits(), + "vm_limits", prom_runtime.VMLimits(), + ) var ( localStorage = &readyStorage{stats: tsdb.NewDBStats()} scraper = &readyScrapeManager{} - remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata) + remoteStorage = remote.NewStorage(logger.With("component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) ) @@ -663,12 +789,12 @@ func main() { ctxWeb, cancelWeb = context.WithCancel(context.Background()) ctxRule = context.Background() - notifierManager = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) + notifierManager = notifier.NewManager(&cfg.notifier, logger.With("component", "notifier")) ctxScrape, cancelScrape = context.WithCancel(context.Background()) ctxNotify, cancelNotify = context.WithCancel(context.Background()) - discoveryManagerScrape discoveryManager - discoveryManagerNotify discoveryManager + discoveryManagerScrape *discovery.Manager + discoveryManagerNotify *discovery.Manager ) // Kubernetes client metrics are used by Kubernetes SD. @@ -678,62 +804,37 @@ func main() { // they are not specific to an SD instance. err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer) if err != nil { - level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err) + logger.Error("failed to register Kubernetes client metrics", "err", err) os.Exit(1) } sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer) if err != nil { - level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err) + logger.Error("failed to register service discovery metrics", "err", err) os.Exit(1) } - if cfg.enableNewSDManager { - { - discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager scrape") - os.Exit(1) - } - discoveryManagerScrape = discMgr - } + discoveryManagerScrape = discovery.NewManager(ctxScrape, logger.With("component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) + if discoveryManagerScrape == nil { + logger.Error("failed to create a discovery manager scrape") + os.Exit(1) + } - { - discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager notify") - os.Exit(1) - } - discoveryManagerNotify = discMgr - } - } else { - { - discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("scrape")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager scrape") - os.Exit(1) - } - discoveryManagerScrape = discMgr - } - - { - discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("notify")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager notify") - os.Exit(1) - } - discoveryManagerNotify = discMgr - } + discoveryManagerNotify = discovery.NewManager(ctxNotify, logger.With("component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) + if discoveryManagerNotify == nil { + logger.Error("failed to create a discovery manager notify") + os.Exit(1) } scrapeManager, err := scrape.NewManager( &cfg.scrape, - log.With(logger, "component", "scrape manager"), + logger.With("component", "scrape manager"), + logging.NewJSONFileLogger, fanoutStorage, prometheus.DefaultRegisterer, ) if err != nil { - level.Error(logger).Log("msg", "failed to create a scrape manager", "err", err) + logger.Error("failed to create a scrape manager", "err", err) os.Exit(1) } @@ -744,43 +845,22 @@ func main() { ruleManager *rules.Manager ) - if cfg.enableAutoGOMAXPROCS { - l := func(format string, a ...interface{}) { - level.Info(logger).Log("component", "automaxprocs", "msg", fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...)) - } - if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil { - level.Warn(logger).Log("component", "automaxprocs", "msg", "Failed to set GOMAXPROCS automatically", "err", err) - } - } - - if cfg.enableAutoGOMEMLIMIT { - if _, err := memlimit.SetGoMemLimitWithOpts( - memlimit.WithRatio(cfg.memlimitRatio), - memlimit.WithProvider( - memlimit.ApplyFallback( - memlimit.FromCgroup, - memlimit.FromSystem, - ), - ), - ); err != nil { - level.Warn(logger).Log("component", "automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err) - } - } - if !agentMode { opts := promql.EngineOpts{ - Logger: log.With(logger, "component", "query engine"), + Logger: logger.With("component", "query engine"), Reg: prometheus.DefaultRegisterer, MaxSamples: cfg.queryMaxSamples, Timeout: time.Duration(cfg.queryTimeout), - ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")), + ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, logger.With("component", "activeQueryTracker")), LookbackDelta: time.Duration(cfg.lookbackDelta), NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get, // EnableAtModifier and EnableNegativeOffset have to be // always on for regular PromQL as of Prometheus v2.33. - EnableAtModifier: true, - EnableNegativeOffset: true, - EnablePerStepStats: cfg.enablePerStepStats, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: cfg.enablePerStepStats, + EnableDelayedNameRemoval: cfg.promqlEnableDelayedNameRemoval, + EnableTypeAndUnitLabels: cfg.scrape.EnableTypeAndUnitLabels, } queryEngine = promql.NewEngine(opts) @@ -793,7 +873,7 @@ func main() { Context: ctxRule, ExternalURL: cfg.web.ExternalURL, Registerer: prometheus.DefaultRegisterer, - Logger: log.With(logger, "component", "rule manager"), + Logger: logger.With("component", "rule manager"), OutageTolerance: time.Duration(cfg.outageTolerance), ForGracePeriod: time.Duration(cfg.forGracePeriod), ResendDelay: time.Duration(cfg.resendDelay), @@ -844,7 +924,7 @@ func main() { } // Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager. - webHandler := web.New(log.With(logger, "component", "web"), &cfg.web) + webHandler := web.New(logger.With("component", "web"), &cfg.web) // Monitor outgoing connections on default transport with conntrack. http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc( @@ -969,15 +1049,15 @@ func main() { }) } - listener, err := webHandler.Listener() + listeners, err := webHandler.Listeners() if err != nil { - level.Error(logger).Log("msg", "Unable to start web listener", "err", err) + logger.Error("Unable to start web listener", "err", err) os.Exit(1) } err = toolkit_web.Validate(*webConfig) if err != nil { - level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err) + logger.Error("Unable to validate web configuration file", "err", err) os.Exit(1) } @@ -992,18 +1072,19 @@ func main() { // Don't forget to release the reloadReady channel so that waiting blocks can exit normally. select { case sig := <-term: - level.Warn(logger).Log("msg", "Received an OS signal, exiting gracefully...", "signal", sig.String()) + logger.Warn("Received an OS signal, exiting gracefully...", "signal", sig.String()) reloadReady.Close() case <-webHandler.Quit(): - level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...") + logger.Warn("Received termination request via web service, exiting gracefully...") case <-cancel: reloadReady.Close() } return nil }, - func(err error) { + func(_ error) { close(cancel) - webHandler.SetReady(false) + webHandler.SetReady(web.Stopping) + notifs.AddNotification(notifications.ShuttingDown) }, ) } @@ -1012,11 +1093,11 @@ func main() { g.Add( func() error { err := discoveryManagerScrape.Run() - level.Info(logger).Log("msg", "Scrape discovery manager stopped") + logger.Info("Scrape discovery manager stopped") return err }, - func(err error) { - level.Info(logger).Log("msg", "Stopping scrape discovery manager...") + func(_ error) { + logger.Info("Stopping scrape discovery manager...") cancelScrape() }, ) @@ -1026,11 +1107,11 @@ func main() { g.Add( func() error { err := discoveryManagerNotify.Run() - level.Info(logger).Log("msg", "Notify discovery manager stopped") + logger.Info("Notify discovery manager stopped") return err }, - func(err error) { - level.Info(logger).Log("msg", "Stopping notify discovery manager...") + func(_ error) { + logger.Info("Stopping notify discovery manager...") cancelNotify() }, ) @@ -1043,7 +1124,7 @@ func main() { ruleManager.Run() return nil }, - func(err error) { + func(_ error) { ruleManager.Stop() }, ) @@ -1059,15 +1140,15 @@ func main() { <-reloadReady.C err := scrapeManager.Run(discoveryManagerScrape.SyncCh()) - level.Info(logger).Log("msg", "Scrape manager stopped") + logger.Info("Scrape manager stopped") return err }, - func(err error) { + func(_ error) { // Scrape manager needs to be stopped before closing the local TSDB // so that it doesn't try to write samples to a closed storage. // We should also wait for rule manager to be fully stopped to ensure // we don't trigger any false positive alerts for rules using absent(). - level.Info(logger).Log("msg", "Stopping scrape manager...") + logger.Info("Stopping scrape manager...") scrapeManager.Stop() }, ) @@ -1080,7 +1161,7 @@ func main() { tracingManager.Run() return nil }, - func(err error) { + func(_ error) { tracingManager.Stop() }, ) @@ -1093,6 +1174,23 @@ func main() { hup := make(chan os.Signal, 1) signal.Notify(hup, syscall.SIGHUP) cancel := make(chan struct{}) + + var checksum string + if cfg.enableAutoReload { + checksum, err = config.GenerateChecksum(cfg.configFile) + if err != nil { + logger.Error("Failed to generate initial checksum for configuration file", "err", err) + } + } + + callback := func(success bool) { + if success { + notifs.DeleteNotification(notifications.ConfigurationUnsuccessful) + return + } + notifs.AddNotification(notifications.ConfigurationUnsuccessful) + } + g.Add( func() error { <-reloadReady.C @@ -1100,22 +1198,51 @@ func main() { for { select { case <-hup: - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { - level.Error(logger).Log("msg", "Error reloading config", "err", err) + if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { + logger.Error("Error reloading config", "err", err) + } else if cfg.enableAutoReload { + checksum, err = config.GenerateChecksum(cfg.configFile) + if err != nil { + logger.Error("Failed to generate checksum during configuration reload", "err", err) + } } case rc := <-webHandler.Reload(): - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { - level.Error(logger).Log("msg", "Error reloading config", "err", err) + if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { + logger.Error("Error reloading config", "err", err) rc <- err } else { rc <- nil + if cfg.enableAutoReload { + checksum, err = config.GenerateChecksum(cfg.configFile) + if err != nil { + logger.Error("Failed to generate checksum during configuration reload", "err", err) + } + } + } + case <-time.Tick(time.Duration(cfg.autoReloadInterval)): + if !cfg.enableAutoReload { + continue + } + currentChecksum, err := config.GenerateChecksum(cfg.configFile) + if err != nil { + checksum = currentChecksum + logger.Error("Failed to generate checksum during configuration reload", "err", err) + } else if currentChecksum == checksum { + continue + } + logger.Info("Configuration file change detected, reloading the configuration.") + + if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { + logger.Error("Error reloading config", "err", err) + } else { + checksum = currentChecksum } case <-cancel: return nil } } }, - func(err error) { + func(_ error) { // Wait for any in-progress reloads to complete to avoid // reloading things after they have been shutdown. cancel <- struct{}{} @@ -1135,18 +1262,19 @@ func main() { return nil } - if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { + if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil { return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err) } reloadReady.Close() - webHandler.SetReady(true) - level.Info(logger).Log("msg", "Server is ready to receive web requests.") + webHandler.SetReady(web.Ready) + notifs.DeleteNotification(notifications.StartingUp) + logger.Info("Server is ready to receive web requests.") <-cancel return nil }, - func(err error) { + func(_ error) { close(cancel) }, ) @@ -1157,7 +1285,7 @@ func main() { cancel := make(chan struct{}) g.Add( func() error { - level.Info(logger).Log("msg", "Starting TSDB ...") + logger.Info("Starting TSDB ...") if cfg.tsdb.WALSegmentSize != 0 { if cfg.tsdb.WALSegmentSize < 10*1024*1024 || cfg.tsdb.WALSegmentSize > 256*1024*1024 { return errors.New("flag 'storage.tsdb.wal-segment-size' must be set between 10MB and 256MB") @@ -1176,20 +1304,20 @@ func main() { switch fsType := prom_runtime.Statfs(localStoragePath); fsType { case "NFS_SUPER_MAGIC": - level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") + logger.Warn("This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.", "fs_type", fsType) default: - level.Info(logger).Log("fs_type", fsType) + logger.Info("filesystem information", "fs_type", fsType) } - level.Info(logger).Log("msg", "TSDB started") - level.Debug(logger).Log("msg", "TSDB options", + logger.Info("TSDB started") + logger.Debug("TSDB options", "MinBlockDuration", cfg.tsdb.MinBlockDuration, "MaxBlockDuration", cfg.tsdb.MaxBlockDuration, "MaxBytes", cfg.tsdb.MaxBytes, "NoLockfile", cfg.tsdb.NoLockfile, "RetentionDuration", cfg.tsdb.RetentionDuration, "WALSegmentSize", cfg.tsdb.WALSegmentSize, - "WALCompression", cfg.tsdb.WALCompression, + "WALCompressionType", cfg.tsdb.WALCompressionType, ) startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000) @@ -1199,9 +1327,9 @@ func main() { <-cancel return nil }, - func(err error) { + func(_ error) { if err := fanoutStorage.Close(); err != nil { - level.Error(logger).Log("msg", "Error stopping storage", "err", err) + logger.Error("Error stopping storage", "err", err) } close(cancel) }, @@ -1213,7 +1341,7 @@ func main() { cancel := make(chan struct{}) g.Add( func() error { - level.Info(logger).Log("msg", "Starting WAL storage ...") + logger.Info("Starting WAL storage ...") if cfg.agent.WALSegmentSize != 0 { if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 { return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB") @@ -1232,15 +1360,15 @@ func main() { switch fsType := prom_runtime.Statfs(localStoragePath); fsType { case "NFS_SUPER_MAGIC": - level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") + logger.Warn(fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") default: - level.Info(logger).Log("fs_type", fsType) + logger.Info(fsType) } - level.Info(logger).Log("msg", "Agent WAL storage started") - level.Debug(logger).Log("msg", "Agent WAL storage options", + logger.Info("Agent WAL storage started") + logger.Debug("Agent WAL storage options", "WALSegmentSize", cfg.agent.WALSegmentSize, - "WALCompression", cfg.agent.WALCompression, + "WALCompressionType", cfg.agent.WALCompressionType, "StripeSize", cfg.agent.StripeSize, "TruncateFrequency", cfg.agent.TruncateFrequency, "MinWALTime", cfg.agent.MinWALTime, @@ -1254,9 +1382,9 @@ func main() { <-cancel return nil }, - func(e error) { + func(_ error) { if err := fanoutStorage.Close(); err != nil { - level.Error(logger).Log("msg", "Error stopping storage", "err", err) + logger.Error("Error stopping storage", "err", err) } close(cancel) }, @@ -1266,12 +1394,12 @@ func main() { // Web handler. g.Add( func() error { - if err := webHandler.Run(ctxWeb, listener, *webConfig); err != nil { + if err := webHandler.Run(ctxWeb, listeners, *webConfig); err != nil { return fmt.Errorf("error starting web server: %w", err) } return nil }, - func(err error) { + func(_ error) { cancelWeb() }, ) @@ -1290,25 +1418,27 @@ func main() { <-reloadReady.C notifierManager.Run(discoveryManagerNotify.SyncCh()) - level.Info(logger).Log("msg", "Notifier manager stopped") + logger.Info("Notifier manager stopped") return nil }, - func(err error) { + func(_ error) { notifierManager.Stop() }, ) } - if err := g.Run(); err != nil { - level.Error(logger).Log("err", err) - os.Exit(1) - } - level.Info(logger).Log("msg", "See you next time!") + func() { // This function exists so the top of the stack is named 'main.main.funcxxx' and not 'oklog'. + if err := g.Run(); err != nil { + logger.Error("Fatal error", "err", err) + os.Exit(1) + } + }() + logger.Info("See you next time!") } -func openDBWithMetrics(dir string, logger log.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) { +func openDBWithMetrics(dir string, logger *slog.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) { db, err := tsdb.Open( dir, - log.With(logger, "component", "tsdb"), + logger.With("component", "tsdb"), reg, opts, stats, @@ -1361,21 +1491,23 @@ type reloader struct { reloader func(*config.Config) error } -func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { +func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) { start := time.Now() - timings := []interface{}{} - level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) + timingsLogger := logger + logger.Info("Loading configuration file", "filename", filename) defer func() { if err == nil { configSuccess.Set(1) configSuccessTime.SetToCurrentTime() + callback(true) } else { configSuccess.Set(0) + callback(false) } }() - conf, err := config.LoadFile(filename, agentMode, expandExternalLabels, logger) + conf, err := config.LoadFile(filename, agentMode, logger) if err != nil { return fmt.Errorf("couldn't load configuration (--config.file=%q): %w", filename, err) } @@ -1390,18 +1522,26 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b for _, rl := range rls { rstart := time.Now() if err := rl.reloader(conf); err != nil { - level.Error(logger).Log("msg", "Failed to apply configuration", "err", err) + logger.Error("Failed to apply configuration", "err", err) failed = true } - timings = append(timings, rl.name, time.Since(rstart)) + timingsLogger = timingsLogger.With((rl.name), time.Since(rstart)) } if failed { return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) } + updateGoGC(conf, logger) + + noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval) + timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)) + return nil +} + +func updateGoGC(conf *config.Config, logger *slog.Logger) { oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC) if oldGoGC != conf.Runtime.GoGC { - level.Info(logger).Log("msg", "updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC) + logger.Info("updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC) } // Write the new setting out to the ENV var for runtime API output. if conf.Runtime.GoGC >= 0 { @@ -1409,11 +1549,6 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b } else { os.Setenv("GOGC", "off") } - - noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval) - l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)} - level.Info(logger).Log(append(l, timings...)...) - return nil } func startsOrEndsWithQuote(s string) bool { @@ -1566,23 +1701,30 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender { type notReadyAppender struct{} -func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { +// SetOptions does nothing in this appender implementation. +func (n notReadyAppender) SetOptions(_ *storage.AppendOptions) {} + +func (n notReadyAppender) Append(_ storage.SeriesRef, _ labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } -func (n notReadyAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { +func (n notReadyAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } -func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { +func (n notReadyAppender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } -func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { +func (n notReadyAppender) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } -func (n notReadyAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { +func (n notReadyAppender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { + return 0, tsdb.ErrNotReady +} + +func (n notReadyAppender) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } @@ -1667,7 +1809,7 @@ func (s *readyStorage) WALReplayStatus() (tsdb.WALReplayStatus, error) { } // ErrNotReady is returned if the underlying scrape manager is not ready yet. -var ErrNotReady = errors.New("Scrape manager not ready") +var ErrNotReady = errors.New("scrape manager not ready") // ReadyScrapeManager allows a scrape manager to be retrieved. Even if it's set at a later point in time. type readyScrapeManager struct { @@ -1703,8 +1845,7 @@ type tsdbOptions struct { RetentionDuration model.Duration MaxBytes units.Base2Bytes NoLockfile bool - WALCompression bool - WALCompressionType string + WALCompressionType compression.Type HeadChunksWriteQueueSize int SamplesPerChunk int StripeSize int @@ -1715,6 +1856,10 @@ type tsdbOptions struct { MaxExemplars int64 EnableMemorySnapshotOnShutdown bool EnableNativeHistograms bool + EnableDelayedCompaction bool + CompactionDelayMaxPercent int + EnableOverlappingCompaction bool + UseUncachedIO bool } func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { @@ -1724,7 +1869,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { RetentionDuration: int64(time.Duration(opts.RetentionDuration) / time.Millisecond), MaxBytes: int64(opts.MaxBytes), NoLockfile: opts.NoLockfile, - WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), + WALCompression: opts.WALCompressionType, HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize, SamplesPerChunk: opts.SamplesPerChunk, StripeSize: opts.StripeSize, @@ -1735,7 +1880,10 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown, EnableNativeHistograms: opts.EnableNativeHistograms, OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow, - EnableOverlappingCompaction: true, + EnableDelayedCompaction: opts.EnableDelayedCompaction, + CompactionDelayMaxPercent: opts.CompactionDelayMaxPercent, + EnableOverlappingCompaction: opts.EnableOverlappingCompaction, + UseUncachedIO: opts.UseUncachedIO, } } @@ -1743,8 +1891,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { // as agent.Option fields are unit agnostic (time). type agentOptions struct { WALSegmentSize units.Base2Bytes - WALCompression bool - WALCompressionType string + WALCompressionType compression.Type StripeSize int TruncateFrequency model.Duration MinWALTime, MaxWALTime model.Duration @@ -1758,7 +1905,7 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option } return agent.Options{ WALSegmentSize: int(opts.WALSegmentSize), - WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), + WALCompression: opts.WALCompressionType, StripeSize: opts.StripeSize, TruncateFrequency: time.Duration(opts.TruncateFrequency), MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)), @@ -1768,15 +1915,6 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option } } -// discoveryManager interfaces the discovery manager. This is used to keep using -// the manager that restarts SD's on reload for a few releases until we feel -// the new manager can be enabled for all users. -type discoveryManager interface { - ApplyConfig(cfg map[string]discovery.Configs) error - Run() error - SyncCh() <-chan map[string][]*targetgroup.Group -} - // rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum. type rwProtoMsgFlagParser struct { msgs *[]config.RemoteWriteProtoMsg @@ -1804,10 +1942,8 @@ func (p *rwProtoMsgFlagParser) Set(opt string) error { if err := t.Validate(); err != nil { return err } - for _, prev := range *p.msgs { - if prev == t { - return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs) - } + if slices.Contains(*p.msgs, t) { + return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs) } *p.msgs = append(*p.msgs, t) return nil diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index c827812e60..e4262f1b3b 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -20,28 +20,38 @@ import ( "fmt" "io" "math" + "net/http" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" + "sync" "syscall" "testing" "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/rules" + "github.com/prometheus/prometheus/util/testutil" ) +func init() { + // This can be removed when the legacy global mode is fully deprecated. + //nolint:staticcheck + model.NameValidationScheme = model.UTF8Validation +} + const startupTime = 10 * time.Second var ( @@ -120,6 +130,7 @@ func TestFailedStartupExitCode(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() fakeInputFile := "fake-input-file" expectedExitStatus := 2 @@ -206,83 +217,139 @@ func TestWALSegmentSizeBounds(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() - for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} { - prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) + for _, tc := range []struct { + size string + exitCode int + }{ + { + size: "9MB", + exitCode: 1, + }, + { + size: "257MB", + exitCode: 1, + }, + { + size: "10", + exitCode: 2, + }, + { + size: "1GB", + exitCode: 1, + }, + { + size: "12MB", + exitCode: 0, + }, + } { + t.Run(tc.size, func(t *testing.T) { + t.Parallel() + prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) - // Log stderr in case of failure. - stderr, err := prom.StderrPipe() - require.NoError(t, err) - go func() { - slurp, _ := io.ReadAll(stderr) - t.Log(string(slurp)) - }() + // Log stderr in case of failure. + stderr, err := prom.StderrPipe() + require.NoError(t, err) - err = prom.Start() - require.NoError(t, err) + // WaitGroup is used to ensure that we don't call t.Log() after the test has finished. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() - if expectedExitStatus == 0 { - done := make(chan error, 1) - go func() { done <- prom.Wait() }() - select { - case err := <-done: - require.Fail(t, "prometheus should be still running: %v", err) - case <-time.After(startupTime): - prom.Process.Kill() - <-done + go func() { + defer wg.Done() + slurp, _ := io.ReadAll(stderr) + t.Log(string(slurp)) + }() + + err = prom.Start() + require.NoError(t, err) + + if tc.exitCode == 0 { + done := make(chan error, 1) + go func() { done <- prom.Wait() }() + select { + case err := <-done: + t.Fatalf("prometheus should be still running: %v", err) + case <-time.After(startupTime): + prom.Process.Kill() + <-done + } + return } - continue - } - err = prom.Wait() - require.Error(t, err) - var exitError *exec.ExitError - require.ErrorAs(t, err, &exitError) - status := exitError.Sys().(syscall.WaitStatus) - require.Equal(t, expectedExitStatus, status.ExitStatus()) + err = prom.Wait() + require.Error(t, err) + var exitError *exec.ExitError + require.ErrorAs(t, err, &exitError) + status := exitError.Sys().(syscall.WaitStatus) + require.Equal(t, tc.exitCode, status.ExitStatus()) + }) } } func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) { - t.Parallel() - if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() - for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} { - prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) + for _, tc := range []struct { + size string + exitCode int + }{ + { + size: "512KB", + exitCode: 1, + }, + { + size: "1MB", + exitCode: 0, + }, + } { + t.Run(tc.size, func(t *testing.T) { + t.Parallel() + prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) - // Log stderr in case of failure. - stderr, err := prom.StderrPipe() - require.NoError(t, err) - go func() { - slurp, _ := io.ReadAll(stderr) - t.Log(string(slurp)) - }() + // Log stderr in case of failure. + stderr, err := prom.StderrPipe() + require.NoError(t, err) - err = prom.Start() - require.NoError(t, err) + // WaitGroup is used to ensure that we don't call t.Log() after the test has finished. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() - if expectedExitStatus == 0 { - done := make(chan error, 1) - go func() { done <- prom.Wait() }() - select { - case err := <-done: - require.Fail(t, "prometheus should be still running: %v", err) - case <-time.After(startupTime): - prom.Process.Kill() - <-done + go func() { + defer wg.Done() + slurp, _ := io.ReadAll(stderr) + t.Log(string(slurp)) + }() + + err = prom.Start() + require.NoError(t, err) + + if tc.exitCode == 0 { + done := make(chan error, 1) + go func() { done <- prom.Wait() }() + select { + case err := <-done: + t.Fatalf("prometheus should be still running: %v", err) + case <-time.After(startupTime): + prom.Process.Kill() + <-done + } + return } - continue - } - err = prom.Wait() - require.Error(t, err) - var exitError *exec.ExitError - require.ErrorAs(t, err, &exitError) - status := exitError.Sys().(syscall.WaitStatus) - require.Equal(t, expectedExitStatus, status.ExitStatus()) + err = prom.Wait() + require.Error(t, err) + var exitError *exec.ExitError + require.ErrorAs(t, err, &exitError) + status := exitError.Sys().(syscall.WaitStatus) + require.Equal(t, tc.exitCode, status.ExitStatus()) + }) } } @@ -290,7 +357,7 @@ func TestTimeMetrics(t *testing.T) { tmpDir := t.TempDir() reg := prometheus.NewRegistry() - db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil, nil) + db, err := openDBWithMetrics(tmpDir, promslog.NewNopLogger(), reg, nil, nil) require.NoError(t, err) defer func() { require.NoError(t, db.Close()) @@ -348,7 +415,9 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames } func TestAgentSuccessfulStartup(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) + t.Parallel() + + prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -366,7 +435,9 @@ func TestAgentSuccessfulStartup(t *testing.T) { } func TestAgentFailedStartupWithServerFlag(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) + t.Parallel() + + prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) output := bytes.Buffer{} prom.Stderr = &output @@ -393,7 +464,9 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) { } func TestAgentFailedStartupWithInvalidConfig(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) + t.Parallel() + + prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -414,6 +487,7 @@ func TestModeSpecificFlags(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() testcases := []struct { mode string @@ -428,10 +502,11 @@ func TestModeSpecificFlags(t *testing.T) { for _, tc := range testcases { t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) { + t.Parallel() args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"} if tc.mode == "agent" { - args = append(args, "--enable-feature=agent", "--config.file="+agentConfig) + args = append(args, "--agent", "--config.file="+agentConfig) } else { args = append(args, "--config.file="+promConfig) } @@ -441,7 +516,14 @@ func TestModeSpecificFlags(t *testing.T) { // Log stderr in case of failure. stderr, err := prom.StderrPipe() require.NoError(t, err) + + // WaitGroup is used to ensure that we don't call t.Log() after the test has finished. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() + go func() { + defer wg.Done() slurp, _ := io.ReadAll(stderr) t.Log(string(slurp)) }() @@ -479,6 +561,8 @@ func TestDocumentation(t *testing.T) { if runtime.GOOS == "windows" { t.SkipNow() } + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -503,6 +587,8 @@ func TestDocumentation(t *testing.T) { } func TestRwProtoMsgFlagParser(t *testing.T) { + t.Parallel() + defaultOpts := config.RemoteWriteProtoMsgs{ config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2, } @@ -563,3 +649,236 @@ func TestRwProtoMsgFlagParser(t *testing.T) { }) } } + +// reloadPrometheusConfig sends a reload request to the Prometheus server to apply +// updated configurations. +func reloadPrometheusConfig(t *testing.T, reloadURL string) { + t.Helper() + + r, err := http.Post(reloadURL, "text/plain", nil) + require.NoError(t, err, "Failed to reload Prometheus") + require.Equal(t, http.StatusOK, r.StatusCode, "Unexpected status code when reloading Prometheus") +} + +func getMetricValue(t *testing.T, body io.Reader, metricType model.MetricType, metricName string) (float64, error) { + t.Helper() + + p := expfmt.TextParser{} + metricFamilies, err := p.TextToMetricFamilies(body) + if err != nil { + return 0, err + } + metricFamily, ok := metricFamilies[metricName] + if !ok { + return 0, errors.New("metric family not found") + } + metric := metricFamily.GetMetric() + if len(metric) != 1 { + return 0, errors.New("metric not found") + } + switch metricType { + case model.MetricTypeGauge: + return metric[0].GetGauge().GetValue(), nil + case model.MetricTypeCounter: + return metric[0].GetCounter().GetValue(), nil + default: + t.Fatalf("metric type %s not supported", metricType) + } + + return 0, errors.New("cannot get value") +} + +func TestRuntimeGOGCConfig(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + t.Parallel() + + for _, tc := range []struct { + name string + config string + gogcEnvVar string + expectedGOGC float64 + }{ + { + name: "empty config file", + expectedGOGC: 75, + }, + { + name: "empty config file with GOGC env var set", + gogcEnvVar: "66", + expectedGOGC: 66, + }, + { + name: "gogc set through config", + config: ` +runtime: + gogc: 77`, + expectedGOGC: 77.0, + }, + { + name: "gogc set through config and env var", + config: ` +runtime: + gogc: 77`, + gogcEnvVar: "88", + expectedGOGC: 77.0, + }, + { + name: "incomplete runtime block", + config: ` +runtime:`, + expectedGOGC: 75.0, + }, + { + name: "incomplete runtime block and GOGC env var set", + config: ` +runtime:`, + gogcEnvVar: "88", + expectedGOGC: 88.0, + }, + { + name: "unrelated config and GOGC env var set", + config: ` +global: + scrape_interval: 500ms`, + gogcEnvVar: "80", + expectedGOGC: 80, + }, + } { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + configFile := filepath.Join(tmpDir, "prometheus.yml") + + port := testutil.RandomUnprivilegedPort(t) + os.WriteFile(configFile, []byte(tc.config), 0o777) + prom := prometheusCommandWithLogging( + t, + configFile, + port, + fmt.Sprintf("--storage.tsdb.path=%s", tmpDir), + "--web.enable-lifecycle", + ) + // Inject GOGC when set. + prom.Env = os.Environ() + if tc.gogcEnvVar != "" { + prom.Env = append(prom.Env, fmt.Sprintf("GOGC=%s", tc.gogcEnvVar)) + } + require.NoError(t, prom.Start()) + + ensureGOGCValue := func(val float64) { + var ( + r *http.Response + err error + ) + // Wait for the /metrics endpoint to be ready. + require.Eventually(t, func() bool { + r, err = http.Get(fmt.Sprintf("http://127.0.0.1:%d/metrics", port)) + if err != nil { + return false + } + return r.StatusCode == http.StatusOK + }, 5*time.Second, 50*time.Millisecond) + defer r.Body.Close() + + // Check the final GOGC that's set, consider go_gc_gogc_percent from /metrics as source of truth. + gogc, err := getMetricValue(t, r.Body, model.MetricTypeGauge, "go_gc_gogc_percent") + require.NoError(t, err) + require.Equal(t, val, gogc) + } + + // The value is applied on startup. + ensureGOGCValue(tc.expectedGOGC) + + // After a reload with the same config, the value stays the same. + reloadURL := fmt.Sprintf("http://127.0.0.1:%d/-/reload", port) + reloadPrometheusConfig(t, reloadURL) + ensureGOGCValue(tc.expectedGOGC) + + // After a reload with different config, the value gets updated. + newConfig := ` +runtime: + gogc: 99` + os.WriteFile(configFile, []byte(newConfig), 0o777) + reloadPrometheusConfig(t, reloadURL) + ensureGOGCValue(99.0) + }) + } +} + +// TestHeadCompactionWhileScraping verifies that running a head compaction +// concurrently with a scrape does not trigger the data race described in +// https://github.com/prometheus/prometheus/issues/16490. +func TestHeadCompactionWhileScraping(t *testing.T) { + t.Parallel() + + // To increase the chance of reproducing the data race + for i := range 5 { + t.Run(strconv.Itoa(i), func(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + configFile := filepath.Join(tmpDir, "prometheus.yml") + + port := testutil.RandomUnprivilegedPort(t) + config := fmt.Sprintf(` +scrape_configs: + - job_name: 'self1' + scrape_interval: 61ms + static_configs: + - targets: ['localhost:%d'] + - job_name: 'self2' + scrape_interval: 67ms + static_configs: + - targets: ['localhost:%d'] +`, port, port) + os.WriteFile(configFile, []byte(config), 0o777) + + prom := prometheusCommandWithLogging( + t, + configFile, + port, + fmt.Sprintf("--storage.tsdb.path=%s", tmpDir), + "--storage.tsdb.min-block-duration=100ms", + ) + require.NoError(t, prom.Start()) + + require.Eventually(t, func() bool { + r, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/metrics", port)) + if err != nil { + return false + } + defer r.Body.Close() + if r.StatusCode != http.StatusOK { + return false + } + metrics, err := io.ReadAll(r.Body) + if err != nil { + return false + } + + // Wait for some compactions to run + compactions, err := getMetricValue(t, bytes.NewReader(metrics), model.MetricTypeCounter, "prometheus_tsdb_compactions_total") + if err != nil { + return false + } + if compactions < 3 { + return false + } + + // Sanity check: Some actual scraping was done. + series, err := getMetricValue(t, bytes.NewReader(metrics), model.MetricTypeCounter, "prometheus_tsdb_head_series_created_total") + require.NoError(t, err) + require.NotZero(t, series) + + // No compaction must have failed + failures, err := getMetricValue(t, bytes.NewReader(metrics), model.MetricTypeCounter, "prometheus_tsdb_compactions_failed_total") + require.NoError(t, err) + require.Zero(t, failures) + return true + }, 15*time.Second, 500*time.Millisecond) + }) + } +} diff --git a/cmd/prometheus/main_unix_test.go b/cmd/prometheus/main_unix_test.go index 2011fb123f..94eec27e79 100644 --- a/cmd/prometheus/main_unix_test.go +++ b/cmd/prometheus/main_unix_test.go @@ -34,6 +34,7 @@ func TestStartupInterrupt(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index 62e317bf8b..7c073b59d0 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -88,20 +88,13 @@ func (p *queryLogTest) setQueryLog(t *testing.T, queryLogFile string) { _, err = p.configFile.Seek(0, 0) require.NoError(t, err) if queryLogFile != "" { - _, err = p.configFile.Write([]byte(fmt.Sprintf("global:\n query_log_file: %s\n", queryLogFile))) + _, err = fmt.Fprintf(p.configFile, "global:\n query_log_file: %s\n", queryLogFile) require.NoError(t, err) } _, err = p.configFile.Write([]byte(p.configuration())) require.NoError(t, err) } -// reloadConfig reloads the configuration using POST. -func (p *queryLogTest) reloadConfig(t *testing.T) { - r, err := http.Post(fmt.Sprintf("http://%s:%d%s/-/reload", p.host, p.port, p.prefix), "text/plain", nil) - require.NoError(t, err) - require.Equal(t, 200, r.StatusCode) -} - // query runs a query according to the test origin. func (p *queryLogTest) query(t *testing.T) { switch p.origin { @@ -125,12 +118,61 @@ func (p *queryLogTest) query(t *testing.T) { require.NoError(t, err) require.Equal(t, 200, r.StatusCode) case ruleOrigin: - time.Sleep(2 * time.Second) + // Poll the /api/v1/rules endpoint until a new rule evaluation is detected. + var lastEvalTime time.Time + for { + r, err := http.Get(fmt.Sprintf("http://%s:%d/api/v1/rules", p.host, p.port)) + require.NoError(t, err) + + rulesBody, err := io.ReadAll(r.Body) + require.NoError(t, err) + defer r.Body.Close() + + // Parse the rules response to find the last evaluation time. + newEvalTime := parseLastEvaluation(rulesBody) + if newEvalTime.After(lastEvalTime) { + if !lastEvalTime.IsZero() { + break + } + lastEvalTime = newEvalTime + } + + time.Sleep(100 * time.Millisecond) + } default: panic("can't query this origin") } } +// parseLastEvaluation extracts the last evaluation timestamp from the /api/v1/rules response. +func parseLastEvaluation(rulesBody []byte) time.Time { + var ruleResponse struct { + Status string `json:"status"` + Data struct { + Groups []struct { + Rules []struct { + LastEvaluation string `json:"lastEvaluation"` + } `json:"rules"` + } `json:"groups"` + } `json:"data"` + } + + err := json.Unmarshal(rulesBody, &ruleResponse) + if err != nil { + return time.Time{} + } + + for _, group := range ruleResponse.Data.Groups { + for _, rule := range group.Rules { + if evalTime, err := time.Parse(time.RFC3339Nano, rule.LastEvaluation); err == nil { + return evalTime + } + } + } + + return time.Time{} +} + // queryString returns the expected queryString of a this test. func (p *queryLogTest) queryString() string { switch p.origin { @@ -259,6 +301,7 @@ func (p *queryLogTest) run(t *testing.T) { }, p.params()...) prom := exec.Command(promPath, params...) + reloadURL := fmt.Sprintf("http://%s:%d%s/-/reload", p.host, p.port, p.prefix) // Log stderr in case of failure. stderr, err := prom.StderrPipe() @@ -286,7 +329,7 @@ func (p *queryLogTest) run(t *testing.T) { p.query(t) require.Empty(t, readQueryLog(t, queryLogFile.Name())) p.setQueryLog(t, queryLogFile.Name()) - p.reloadConfig(t) + reloadPrometheusConfig(t, reloadURL) } p.query(t) @@ -301,7 +344,7 @@ func (p *queryLogTest) run(t *testing.T) { p.validateLastQuery(t, ql) p.setQueryLog(t, "") - p.reloadConfig(t) + reloadPrometheusConfig(t, reloadURL) if !p.exactQueryCount() { qc = len(readQueryLog(t, queryLogFile.Name())) } @@ -313,7 +356,7 @@ func (p *queryLogTest) run(t *testing.T) { qc = len(ql) p.setQueryLog(t, queryLogFile.Name()) - p.reloadConfig(t) + reloadPrometheusConfig(t, reloadURL) p.query(t) qc++ @@ -322,7 +365,7 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Len(t, ql, qc) } else { - require.Greater(t, len(ql), qc, "no queries logged") + require.GreaterOrEqual(t, len(ql), qc, "no queries logged") } p.validateLastQuery(t, ql) qc = len(ql) @@ -353,11 +396,11 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Len(t, ql, qc) } else { - require.Greater(t, len(ql), qc, "no queries logged") + require.GreaterOrEqual(t, len(ql), qc, "no queries logged") } p.validateLastQuery(t, ql) - p.reloadConfig(t) + reloadPrometheusConfig(t, reloadURL) p.query(t) @@ -393,6 +436,7 @@ func readQueryLog(t *testing.T, path string) []queryLogLine { file, err := os.Open(path) require.NoError(t, err) defer file.Close() + scanner := bufio.NewScanner(file) for scanner.Scan() { var q queryLogLine @@ -406,6 +450,7 @@ func TestQueryLog(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() cwd, err := os.Getwd() require.NoError(t, err) @@ -424,6 +469,7 @@ func TestQueryLog(t *testing.T) { } t.Run(p.String(), func(t *testing.T) { + t.Parallel() p.run(t) }) } diff --git a/cmd/prometheus/reload_test.go b/cmd/prometheus/reload_test.go new file mode 100644 index 0000000000..c59e51b316 --- /dev/null +++ b/cmd/prometheus/reload_test.go @@ -0,0 +1,235 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/util/testutil" +) + +const configReloadMetric = "prometheus_config_last_reload_successful" + +func TestAutoReloadConfig_ValidToValid(t *testing.T) { + steps := []struct { + configText string + expectedInterval string + expectedMetric float64 + }{ + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 15s +`, + expectedInterval: "15s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + } + + runTestSteps(t, steps) +} + +func TestAutoReloadConfig_ValidToInvalidToValid(t *testing.T) { + steps := []struct { + configText string + expectedInterval string + expectedMetric float64 + }{ + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 15s +invalid_syntax +`, + expectedInterval: "30s", + expectedMetric: 0, + }, + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + } + + runTestSteps(t, steps) +} + +func runTestSteps(t *testing.T, steps []struct { + configText string + expectedInterval string + expectedMetric float64 +}, +) { + configDir := t.TempDir() + configFilePath := filepath.Join(configDir, "prometheus.yml") + + t.Logf("Config file path: %s", configFilePath) + + require.NoError(t, os.WriteFile(configFilePath, []byte(steps[0].configText), 0o644), "Failed to write initial config file") + + port := testutil.RandomUnprivilegedPort(t) + prom := prometheusCommandWithLogging(t, configFilePath, port, "--enable-feature=auto-reload-config", "--config.auto-reload-interval=1s") + require.NoError(t, prom.Start()) + + baseURL := "http://localhost:" + strconv.Itoa(port) + require.Eventually(t, func() bool { + resp, err := http.Get(baseURL + "/-/ready") + if err != nil { + return false + } + defer resp.Body.Close() + return resp.StatusCode == http.StatusOK + }, 5*time.Second, 100*time.Millisecond, "Prometheus didn't become ready in time") + + for i, step := range steps { + t.Logf("Step %d", i) + require.NoError(t, os.WriteFile(configFilePath, []byte(step.configText), 0o644), "Failed to write config file for step") + + require.Eventually(t, func() bool { + return verifyScrapeInterval(t, baseURL, step.expectedInterval) && + verifyConfigReloadMetric(t, baseURL, step.expectedMetric) + }, 10*time.Second, 500*time.Millisecond, "Prometheus config reload didn't happen in time") + } +} + +func verifyScrapeInterval(t *testing.T, baseURL, expectedInterval string) bool { + resp, err := http.Get(baseURL + "/api/v1/status/config") + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + config := struct { + Data struct { + YAML string `json:"yaml"` + } `json:"data"` + }{} + + require.NoError(t, json.Unmarshal(body, &config)) + return strings.Contains(config.Data.YAML, "scrape_interval: "+expectedInterval) +} + +func verifyConfigReloadMetric(t *testing.T, baseURL string, expectedValue float64) bool { + resp, err := http.Get(baseURL + "/metrics") + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + lines := string(body) + var actualValue float64 + found := false + + for _, line := range strings.Split(lines, "\n") { + if strings.HasPrefix(line, configReloadMetric) { + parts := strings.Fields(line) + if len(parts) >= 2 { + actualValue, err = strconv.ParseFloat(parts[1], 64) + require.NoError(t, err) + found = true + break + } + } + } + + return found && actualValue == expectedValue +} + +func captureLogsToTLog(t *testing.T, r io.Reader) { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + t.Log(scanner.Text()) + } + if err := scanner.Err(); err != nil { + t.Logf("Error reading logs: %v", err) + } +} + +func prometheusCommandWithLogging(t *testing.T, configFilePath string, port int, extraArgs ...string) *exec.Cmd { + stdoutPipe, stdoutWriter := io.Pipe() + stderrPipe, stderrWriter := io.Pipe() + + var wg sync.WaitGroup + wg.Add(2) + + args := []string{ + "-test.main", + "--config.file=" + configFilePath, + "--web.listen-address=0.0.0.0:" + strconv.Itoa(port), + } + args = append(args, extraArgs...) + prom := exec.Command(promPath, args...) + prom.Stdout = stdoutWriter + prom.Stderr = stderrWriter + + go func() { + defer wg.Done() + captureLogsToTLog(t, stdoutPipe) + }() + go func() { + defer wg.Done() + captureLogsToTLog(t, stderrPipe) + }() + + t.Cleanup(func() { + prom.Process.Kill() + prom.Wait() + stdoutWriter.Close() + stderrWriter.Close() + wg.Wait() + }) + return prom +} diff --git a/cmd/prometheus/scrape_failure_log_test.go b/cmd/prometheus/scrape_failure_log_test.go new file mode 100644 index 0000000000..f35cb7bee6 --- /dev/null +++ b/cmd/prometheus/scrape_failure_log_test.go @@ -0,0 +1,186 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bytes" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/prometheus/prometheus/util/testutil" +) + +func TestScrapeFailureLogFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + // Tracks the number of requests made to the mock server. + var requestCount atomic.Int32 + + // Starts a server that always returns HTTP 500 errors. + mockServerAddress := startGarbageServer(t, &requestCount) + + // Create a temporary directory for Prometheus configuration and logs. + tempDir := t.TempDir() + + // Define file paths for the scrape failure log and Prometheus configuration. + // Like other files, the scrape failure log file should be relative to the + // config file. Therefore, we split the name we put in the file and the full + // path used to check the content of the file. + scrapeFailureLogFileName := "scrape_failure.log" + scrapeFailureLogFile := filepath.Join(tempDir, scrapeFailureLogFileName) + promConfigFile := filepath.Join(tempDir, "prometheus.yml") + + // Step 1: Set up an initial Prometheus configuration that globally + // specifies a scrape failure log file. + promConfig := fmt.Sprintf(` +global: + scrape_interval: 500ms + scrape_failure_log_file: %s + +scrape_configs: + - job_name: 'test_job' + static_configs: + - targets: ['%s'] +`, scrapeFailureLogFileName, mockServerAddress) + + err := os.WriteFile(promConfigFile, []byte(promConfig), 0o644) + require.NoError(t, err, "Failed to write Prometheus configuration file") + + // Start Prometheus with the generated configuration and a random port, enabling the lifecycle API. + port := testutil.RandomUnprivilegedPort(t) + params := []string{ + "-test.main", + "--config.file=" + promConfigFile, + "--storage.tsdb.path=" + filepath.Join(tempDir, "data"), + fmt.Sprintf("--web.listen-address=127.0.0.1:%d", port), + "--web.enable-lifecycle", + } + prometheusProcess := exec.Command(promPath, params...) + prometheusProcess.Stdout = os.Stdout + prometheusProcess.Stderr = os.Stderr + + err = prometheusProcess.Start() + require.NoError(t, err, "Failed to start Prometheus") + defer prometheusProcess.Process.Kill() + + // Wait until the mock server receives at least two requests from Prometheus. + require.Eventually(t, func() bool { + return requestCount.Load() >= 2 + }, 30*time.Second, 500*time.Millisecond, "Expected at least two requests to the mock server") + + // Verify that the scrape failures have been logged to the specified file. + content, err := os.ReadFile(scrapeFailureLogFile) + require.NoError(t, err, "Failed to read scrape failure log") + require.Contains(t, string(content), "server returned HTTP status 500 Internal Server Error", "Expected scrape failure log entry not found") + + // Step 2: Update the Prometheus configuration to remove the scrape failure + // log file setting. + promConfig = fmt.Sprintf(` +global: + scrape_interval: 1s + +scrape_configs: + - job_name: 'test_job' + static_configs: + - targets: ['%s'] +`, mockServerAddress) + + err = os.WriteFile(promConfigFile, []byte(promConfig), 0o644) + require.NoError(t, err, "Failed to update Prometheus configuration file") + + // Reload Prometheus with the updated configuration. + reloadURL := fmt.Sprintf("http://127.0.0.1:%d/-/reload", port) + reloadPrometheusConfig(t, reloadURL) + + // Count the number of lines in the scrape failure log file before any + // further requests. + preReloadLogLineCount := countLinesInFile(scrapeFailureLogFile) + + // Wait for at least two more requests to the mock server to ensure + // Prometheus continues scraping. + requestsBeforeReload := requestCount.Load() + require.Eventually(t, func() bool { + return requestCount.Load() >= requestsBeforeReload+2 + }, 30*time.Second, 500*time.Millisecond, "Expected two more requests to the mock server after configuration reload") + + // Ensure that no new lines were added to the scrape failure log file after + // the configuration change. + require.Equal(t, preReloadLogLineCount, countLinesInFile(scrapeFailureLogFile), "No new lines should be added to the scrape failure log file after removing the log setting") + + // Step 3: Re-add the scrape failure log file setting, but this time under + // scrape_configs, and reload Prometheus. + promConfig = fmt.Sprintf(` +global: + scrape_interval: 1s + +scrape_configs: + - job_name: 'test_job' + scrape_failure_log_file: %s + static_configs: + - targets: ['%s'] +`, scrapeFailureLogFileName, mockServerAddress) + + err = os.WriteFile(promConfigFile, []byte(promConfig), 0o644) + require.NoError(t, err, "Failed to update Prometheus configuration file") + + // Reload Prometheus with the updated configuration. + reloadPrometheusConfig(t, reloadURL) + + // Wait for at least two more requests to the mock server and verify that + // new log entries are created. + postReloadLogLineCount := countLinesInFile(scrapeFailureLogFile) + requestsBeforeReAddingLog := requestCount.Load() + require.Eventually(t, func() bool { + return requestCount.Load() >= requestsBeforeReAddingLog+2 + }, 30*time.Second, 500*time.Millisecond, "Expected two additional requests after re-adding the log setting") + + // Confirm that new lines were added to the scrape failure log file. + require.Greater(t, countLinesInFile(scrapeFailureLogFile), postReloadLogLineCount, "New lines should be added to the scrape failure log file after re-adding the log setting") +} + +// startGarbageServer sets up a mock server that returns a 500 Internal Server Error +// for all requests. It also increments the request count each time it's hit. +func startGarbageServer(t *testing.T, requestCount *atomic.Int32) string { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + requestCount.Inc() + w.WriteHeader(http.StatusInternalServerError) + })) + t.Cleanup(server.Close) + + parsedURL, err := url.Parse(server.URL) + require.NoError(t, err, "Failed to parse mock server URL") + + return parsedURL.Host +} + +// countLinesInFile counts and returns the number of lines in the specified file. +func countLinesInFile(filePath string) int { + data, err := os.ReadFile(filePath) + if err != nil { + return 0 // Return 0 if the file doesn't exist or can't be read. + } + return bytes.Count(data, []byte{'\n'}) +} diff --git a/cmd/promtool/analyze.go b/cmd/promtool/analyze.go index c1f523de52..26e6f2188c 100644 --- a/cmd/promtool/analyze.go +++ b/cmd/promtool/analyze.go @@ -34,8 +34,8 @@ import ( ) var ( - errNotNativeHistogram = fmt.Errorf("not a native histogram") - errNotEnoughData = fmt.Errorf("not enough data") + errNotNativeHistogram = errors.New("not a native histogram") + errNotEnoughData = errors.New("not enough data") outputHeader = `Bucket stats for each histogram series over time ------------------------------------------------ @@ -169,7 +169,7 @@ func querySamples(ctx context.Context, api v1.API, query string, end time.Time) matrix, ok := values.(model.Matrix) if !ok { - return nil, fmt.Errorf("query of buckets resulted in non-Matrix") + return nil, errors.New("query of buckets resulted in non-Matrix") } return matrix, nil @@ -259,7 +259,7 @@ func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int, prev := matrix[i].Values[timeIdx] // Assume the results are nicely aligned. if curr.Timestamp != prev.Timestamp { - return counts, fmt.Errorf("matrix result is not time aligned") + return counts, errors.New("matrix result is not time aligned") } counts[i+1] = int(curr.Value - prev.Value) } diff --git a/cmd/promtool/analyze_test.go b/cmd/promtool/analyze_test.go index 83d2ac4a3d..3de4283a15 100644 --- a/cmd/promtool/analyze_test.go +++ b/cmd/promtool/analyze_test.go @@ -17,9 +17,8 @@ import ( "fmt" "testing" - "github.com/stretchr/testify/require" - "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" ) var ( @@ -109,6 +108,7 @@ func init() { } func TestGetBucketCountsAtTime(t *testing.T) { + t.Parallel() cases := []struct { matrix model.Matrix length int @@ -137,6 +137,7 @@ func TestGetBucketCountsAtTime(t *testing.T) { for _, c := range cases { t.Run(fmt.Sprintf("exampleMatrix@%d", c.timeIdx), func(t *testing.T) { + t.Parallel() res, err := getBucketCountsAtTime(c.matrix, c.length, c.timeIdx) require.NoError(t, err) require.Equal(t, c.expected, res) @@ -145,6 +146,7 @@ func TestGetBucketCountsAtTime(t *testing.T) { } func TestCalcClassicBucketStatistics(t *testing.T) { + t.Parallel() cases := []struct { matrix model.Matrix expected *statistics @@ -162,6 +164,7 @@ func TestCalcClassicBucketStatistics(t *testing.T) { for i, c := range cases { t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { + t.Parallel() res, err := calcClassicBucketStatistics(c.matrix) require.NoError(t, err) require.Equal(t, c.expected, res) diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 400cae421a..47de3b5c1c 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -21,8 +21,8 @@ import ( "math" "time" - "github.com/go-kit/log" - "github.com/oklog/ulid" + "github.com/oklog/ulid/v2" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" @@ -48,7 +48,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) { _, ts, _ := p.Series() if ts == nil { - return 0, 0, fmt.Errorf("expected timestamp for series got none") + return 0, 0, errors.New("expected timestamp for series got none") } if *ts > maxt { @@ -85,7 +85,7 @@ func getCompatibleBlockDuration(maxBlockDuration int64) int64 { return blockDuration } -func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) { +func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool, customLabels map[string]string) (returnErr error) { blockDuration := getCompatibleBlockDuration(maxBlockDuration) mint = blockDuration * (mint / blockDuration) @@ -102,6 +102,8 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn nextSampleTs int64 = math.MaxInt64 ) + lb := labels.NewBuilder(labels.EmptyLabels()) + for t := mint; t <= maxt; t += blockDuration { tsUpper := t + blockDuration if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper { @@ -118,7 +120,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn // also need to append samples throughout the whole block range. To allow that, we // pretend that the block is twice as large here, but only really add sample in the // original interval later. - w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, 2*blockDuration) + w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), outputDir, 2*blockDuration) if err != nil { return fmt.Errorf("block writer: %w", err) } @@ -146,7 +148,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn _, ts, v := p.Series() if ts == nil { l := labels.Labels{} - p.Metric(&l) + p.Labels(&l) return fmt.Errorf("expected timestamp for series %v, got none", l) } if *ts < t { @@ -160,9 +162,15 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn } l := labels.Labels{} - p.Metric(&l) + p.Labels(&l) - if _, err := app.Append(0, l, *ts, v); err != nil { + lb.Reset(l) + for name, value := range customLabels { + lb.Set(name, value) + } + lbls := lb.Labels() + + if _, err := app.Append(0, lbls, *ts, v); err != nil { return fmt.Errorf("add sample: %w", err) } @@ -221,13 +229,13 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn return nil } -func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) { +func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration, customLabels map[string]string) (err error) { p := textparse.NewOpenMetricsParser(input, nil) // Don't need a SymbolTable to get max and min timestamps. maxt, mint, err := getMinAndMaxTimestamps(p) if err != nil { return fmt.Errorf("getting min and max timestamp: %w", err) } - if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet); err != nil { + if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet, customLabels); err != nil { return fmt.Errorf("block creation: %w", err) } return nil diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index 32abfa46a8..8a599510a9 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -45,7 +45,7 @@ func sortSamples(samples []backfillSample) { }) } -func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { +func queryAllSeries(t testing.TB, q storage.Querier, _, _ int64) []backfillSample { ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) samples := []backfillSample{} for ss.Next() { @@ -86,12 +86,14 @@ func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, exp } func TestBackfill(t *testing.T) { + t.Parallel() tests := []struct { ToParse string IsOk bool Description string MaxSamplesInAppender int MaxBlockDuration time.Duration + Labels map[string]string Expected struct { MinTime int64 MaxTime int64 @@ -636,6 +638,49 @@ http_requests_total{code="400"} 1024 7199 }, }, }, + { + ToParse: `# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{code="200"} 1 1624463088.000 +http_requests_total{code="200"} 2 1629503088.000 +http_requests_total{code="200"} 3 1629863088.000 +# EOF +`, + IsOk: true, + Description: "Sample with external labels.", + MaxSamplesInAppender: 5000, + MaxBlockDuration: 2048 * time.Hour, + Labels: map[string]string{"cluster_id": "123", "org_id": "999"}, + Expected: struct { + MinTime int64 + MaxTime int64 + NumBlocks int + BlockDuration int64 + Samples []backfillSample + }{ + MinTime: 1624463088000, + MaxTime: 1629863088000, + NumBlocks: 2, + BlockDuration: int64(1458 * time.Hour / time.Millisecond), + Samples: []backfillSample{ + { + Timestamp: 1624463088000, + Value: 1, + Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"), + }, + { + Timestamp: 1629503088000, + Value: 2, + Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"), + }, + { + Timestamp: 1629863088000, + Value: 3, + Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"), + }, + }, + }, + }, { ToParse: `# HELP rpc_duration_seconds A summary of the RPC duration in seconds. # TYPE rpc_duration_seconds summary @@ -685,11 +730,12 @@ after_eof 1 2 } for _, test := range tests { t.Run(test.Description, func(t *testing.T) { + t.Parallel() t.Logf("Test:%s", test.Description) outputDir := t.TempDir() - err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration) + err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration, test.Labels) if !test.IsOk { require.Error(t, err, test.Description) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index e1d275e97e..c6a5801d28 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -32,20 +32,18 @@ import ( "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" "github.com/google/pprof/profile" "github.com/prometheus/client_golang/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil/promlint" - config_util "github.com/prometheus/common/config" - "github.com/prometheus/common/model" - "github.com/prometheus/common/version" - "github.com/prometheus/exporter-toolkit/web" - "gopkg.in/yaml.v2" - dto "github.com/prometheus/client_model/go" promconfig "github.com/prometheus/common/config" "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" + "github.com/prometheus/common/version" + "github.com/prometheus/exporter-toolkit/web" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" @@ -58,24 +56,38 @@ import ( _ "github.com/prometheus/prometheus/plugins" // Register plugins. "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/promqltest" + "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/util/documentcli" ) +var promqlEnableDelayedNameRemoval = false + +func init() { + // This can be removed when the legacy global mode is fully deprecated. + //nolint:staticcheck + model.NameValidationScheme = model.UTF8Validation +} + const ( successExitCode = 0 failureExitCode = 1 // Exit code 3 is used for "one or more lint issues detected". lintErrExitCode = 3 - lintOptionAll = "all" - lintOptionDuplicateRules = "duplicate-rules" - lintOptionNone = "none" - checkHealth = "/-/healthy" - checkReadiness = "/-/ready" + lintOptionAll = "all" + lintOptionDuplicateRules = "duplicate-rules" + lintOptionTooLongScrapeInterval = "too-long-scrape-interval" + lintOptionNone = "none" + checkHealth = "/-/healthy" + checkReadiness = "/-/ready" ) -var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone} +var ( + lintRulesOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone} + // Same as lintRulesOptions, but including scrape config linting options as well. + lintConfigOptions = append(append([]string{}, lintRulesOptions...), lintOptionTooLongScrapeInterval) +) func main() { var ( @@ -92,6 +104,10 @@ func main() { app.HelpFlag.Short('h') checkCmd := app.Command("check", "Check the resources for validity.") + checkLookbackDelta := checkCmd.Flag( + "query.lookback-delta", + "The server's maximum query lookback duration.", + ).Default("5m").Duration() experimental := app.Flag("experimental", "Enable experimental commands.").Bool() @@ -108,11 +124,12 @@ func main() { checkConfigSyntaxOnly := checkConfigCmd.Flag("syntax-only", "Only check the config file syntax, ignoring file and content validation referenced in the config").Bool() checkConfigLint := checkConfigCmd.Flag( "lint", - "Linting checks to apply to the rules specified in the config. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting", + "Linting checks to apply to the rules/scrape configs specified in the config. Available options are: "+strings.Join(lintConfigOptions, ", ")+". Use --lint=none to disable linting", ).Default(lintOptionDuplicateRules).String() checkConfigLintFatal := checkConfigCmd.Flag( "lint-fatal", "Make lint errors exit with exit code 3.").Default("false").Bool() + checkConfigIgnoreUnknownFields := checkConfigCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the rule groups read by the config files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool() checkWebConfigCmd := checkCmd.Command("web-config", "Check if the web config files are valid or not.") webConfigFiles := checkWebConfigCmd.Arg( @@ -135,11 +152,12 @@ func main() { ).ExistingFiles() checkRulesLint := checkRulesCmd.Flag( "lint", - "Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting", + "Linting checks to apply. Available options are: "+strings.Join(lintRulesOptions, ", ")+". Use --lint=none to disable linting", ).Default(lintOptionDuplicateRules).String() checkRulesLintFatal := checkRulesCmd.Flag( "lint-fatal", "Make lint errors exit with exit code 3.").Default("false").Bool() + checkRulesIgnoreUnknownFields := checkRulesCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the rule files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool() checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage) checkMetricsExtended := checkCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool() @@ -204,13 +222,16 @@ func main() { pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap() testCmd := app.Command("test", "Unit testing.") + junitOutFile := testCmd.Flag("junit", "File path to store JUnit XML test results.").OpenFile(os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) testRulesCmd := testCmd.Command("rules", "Unit tests for rules.") testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings() testRulesFiles := testRulesCmd.Arg( "test-rule-file", "The unit test file.", ).Required().ExistingFiles() + testRulesDebug := testRulesCmd.Flag("debug", "Enable unit test debugging.").Default("false").Bool() testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool() + testRulesIgnoreUnknownFields := testRulesCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the test files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool() defaultDBPath := "data/" tsdbCmd := app.Command("tsdb", "Run tsdb commands.") @@ -235,16 +256,16 @@ func main() { tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.") dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() - dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String() - dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() - dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() + dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String() + dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() + dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.") dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() - dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String() - dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() - dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() + dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String() + dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() + dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.") @@ -252,6 +273,7 @@ func main() { importQuiet := importCmd.Flag("quiet", "Do not print created blocks.").Short('q').Bool() maxBlockDuration := importCmd.Flag("max-block-duration", "Maximum duration created blocks may span. Anything less than 2h is ignored.").Hidden().PlaceHolder("").Duration() openMetricsImportCmd := importCmd.Command("openmetrics", "Import samples from OpenMetrics input and produce TSDB blocks. Please refer to the storage docs for more details.") + openMetricsLabels := openMetricsImportCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times. Example --label=label_name=label_value").StringMap() importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String() importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String() importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.") @@ -284,7 +306,7 @@ func main() { promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String() promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String() - featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings() + featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details").Default("").Strings() documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden() @@ -303,40 +325,39 @@ func main() { kingpin.Fatalf("Cannot set base auth in the server URL and use a http.config.file at the same time") } var err error - httpConfig, _, err := config_util.LoadHTTPConfigFile(httpConfigFilePath) + httpConfig, _, err := promconfig.LoadHTTPConfigFile(httpConfigFilePath) if err != nil { kingpin.Fatalf("Failed to load HTTP config file: %v", err) } - httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", config_util.WithUserAgent("promtool/"+version.Version)) + httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", promconfig.WithUserAgent(version.ComponentUserAgent("promtool"))) if err != nil { kingpin.Fatalf("Failed to create a new HTTP round tripper: %v", err) } } - var noDefaultScrapePort bool for _, f := range *featureList { opts := strings.Split(f, ",") for _, o := range opts { switch o { - case "no-default-scrape-port": - noDefaultScrapePort = true + case "promql-experimental-functions": + parser.EnableExperimentalFunctions = true + case "promql-delayed-name-removal": + promqlEnableDelayedNameRemoval = true case "": continue - case "promql-at-modifier", "promql-negative-offset": - fmt.Printf(" WARNING: Option for --enable-feature is a no-op after promotion to a stable feature: %q\n", o) default: - fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o) + fmt.Printf(" WARNING: Unknown feature passed to --enable-feature: %s", o) } } } switch parsedCmd { case sdCheckCmd.FullCommand(): - os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort, prometheus.DefaultRegisterer)) + os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer)) case checkConfigCmd.FullCommand(): - os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) + os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newConfigLintConfig(*checkConfigLint, *checkConfigLintFatal, *checkConfigIgnoreUnknownFields, model.Duration(*checkLookbackDelta)), *configFiles...)) case checkServerHealthCmd.FullCommand(): os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper))) @@ -348,7 +369,7 @@ func main() { os.Exit(CheckWebConfig(*webConfigFiles...)) case checkRulesCmd.FullCommand(): - os.Exit(CheckRules(newLintConfig(*checkRulesLint, *checkRulesLintFatal), *ruleFiles...)) + os.Exit(CheckRules(newRulesLintConfig(*checkRulesLint, *checkRulesLintFatal, *checkRulesIgnoreUnknownFields), *ruleFiles...)) case checkMetricsCmd.FullCommand(): os.Exit(CheckMetrics(*checkMetricsExtended)) @@ -378,13 +399,20 @@ func main() { os.Exit(QueryLabels(serverURL, httpRoundTripper, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p)) case testRulesCmd.FullCommand(): - os.Exit(RulesUnitTest( + results := io.Discard + if *junitOutFile != nil { + results = *junitOutFile + } + os.Exit(RulesUnitTestResult(results, promqltest.LazyLoaderOpts{ - EnableAtModifier: true, - EnableNegativeOffset: true, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnableDelayedNameRemoval: promqlEnableDelayedNameRemoval, }, *testRulesRun, *testRulesDiff, + *testRulesDebug, + *testRulesIgnoreUnknownFields, *testRulesFiles...), ) @@ -403,7 +431,7 @@ func main() { os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics))) // TODO(aSquare14): Work on adding support for custom block size. case openMetricsImportCmd.FullCommand(): - os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) + os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration, *openMetricsLabels)) case importRulesCmd.FullCommand(): os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...))) @@ -435,18 +463,20 @@ func checkExperimental(f bool) { } } -var errLint = fmt.Errorf("lint error") +var errLint = errors.New("lint error") -type lintConfig struct { - all bool - duplicateRules bool - fatal bool +type rulesLintConfig struct { + all bool + duplicateRules bool + fatal bool + ignoreUnknownFields bool } -func newLintConfig(stringVal string, fatal bool) lintConfig { +func newRulesLintConfig(stringVal string, fatal, ignoreUnknownFields bool) rulesLintConfig { items := strings.Split(stringVal, ",") - ls := lintConfig{ - fatal: fatal, + ls := rulesLintConfig{ + fatal: fatal, + ignoreUnknownFields: ignoreUnknownFields, } for _, setting := range items { switch setting { @@ -456,17 +486,58 @@ func newLintConfig(stringVal string, fatal bool) lintConfig { ls.duplicateRules = true case lintOptionNone: default: - fmt.Printf("WARNING: unknown lint option %s\n", setting) + fmt.Printf("WARNING: unknown lint option: %q\n", setting) } } return ls } -func (ls lintConfig) lintDuplicateRules() bool { +func (ls rulesLintConfig) lintDuplicateRules() bool { return ls.all || ls.duplicateRules } -// Check server status - healthy & ready. +type configLintConfig struct { + rulesLintConfig + + lookbackDelta model.Duration +} + +func newConfigLintConfig(optionsStr string, fatal, ignoreUnknownFields bool, lookbackDelta model.Duration) configLintConfig { + c := configLintConfig{ + rulesLintConfig: rulesLintConfig{ + fatal: fatal, + }, + } + + lintNone := false + var rulesOptions []string + for _, option := range strings.Split(optionsStr, ",") { + switch option { + case lintOptionAll, lintOptionTooLongScrapeInterval: + c.lookbackDelta = lookbackDelta + if option == lintOptionAll { + rulesOptions = append(rulesOptions, lintOptionAll) + } + case lintOptionNone: + lintNone = true + default: + rulesOptions = append(rulesOptions, option) + } + } + + if lintNone { + c.lookbackDelta = 0 + rulesOptions = nil + } + + if len(rulesOptions) > 0 { + c.rulesLintConfig = newRulesLintConfig(strings.Join(rulesOptions, ","), fatal, ignoreUnknownFields) + } + + return c +} + +// CheckServerStatus - healthy & ready. func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error { if serverURL.Scheme == "" { serverURL.Scheme = "http" @@ -504,12 +575,12 @@ func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper ht } // CheckConfig validates configuration files. -func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int { +func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings configLintConfig, files ...string) int { failed := false hasErrors := false for _, f := range files { - ruleFiles, err := checkConfig(agentMode, f, checkSyntaxOnly) + ruleFiles, scrapeConfigs, err := checkConfig(agentMode, f, checkSyntaxOnly) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) hasErrors = true @@ -522,12 +593,12 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files } fmt.Println() - rulesFailed, rulesHasErrors := checkRules(ruleFiles, lintSettings) - if rulesFailed { - failed = rulesFailed - } - if rulesHasErrors { - hasErrors = rulesHasErrors + if !checkSyntaxOnly { + scrapeConfigsFailed := lintScrapeConfigs(scrapeConfigs, lintSettings) + failed = failed || scrapeConfigsFailed + rulesFailed, rulesHaveErrors := checkRules(ruleFiles, lintSettings.rulesLintConfig) + failed = failed || rulesFailed + hasErrors = hasErrors || rulesHaveErrors } } if failed && hasErrors { @@ -566,12 +637,12 @@ func checkFileExists(fn string) error { return err } -func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) { +func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, []*config.ScrapeConfig, error) { fmt.Println("Checking", filename) - cfg, err := config.LoadFile(filename, agentMode, false, log.NewNopLogger()) + cfg, err := config.LoadFile(filename, agentMode, promslog.NewNopLogger()) if err != nil { - return nil, err + return nil, nil, err } var ruleFiles []string @@ -579,15 +650,15 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin for _, rf := range cfg.RuleFiles { rfs, err := filepath.Glob(rf) if err != nil { - return nil, err + return nil, nil, err } // If an explicit file was given, error if it is not accessible. if !strings.Contains(rf, "*") { if len(rfs) == 0 { - return nil, fmt.Errorf("%q does not point to an existing file", rf) + return nil, nil, fmt.Errorf("%q does not point to an existing file", rf) } if err := checkFileExists(rfs[0]); err != nil { - return nil, fmt.Errorf("error checking rule file %q: %w", rfs[0], err) + return nil, nil, fmt.Errorf("error checking rule file %q: %w", rfs[0], err) } } ruleFiles = append(ruleFiles, rfs...) @@ -601,26 +672,26 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin var err error scfgs, err = cfg.GetScrapeConfigs() if err != nil { - return nil, fmt.Errorf("error loading scrape configs: %w", err) + return nil, nil, fmt.Errorf("error loading scrape configs: %w", err) } } for _, scfg := range scfgs { if !checkSyntaxOnly && scfg.HTTPClientConfig.Authorization != nil { if err := checkFileExists(scfg.HTTPClientConfig.Authorization.CredentialsFile); err != nil { - return nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err) + return nil, nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err) } } if err := checkTLSConfig(scfg.HTTPClientConfig.TLSConfig, checkSyntaxOnly); err != nil { - return nil, err + return nil, nil, err } for _, c := range scfg.ServiceDiscoveryConfigs { switch c := c.(type) { case *kubernetes.SDConfig: if err := checkTLSConfig(c.HTTPClientConfig.TLSConfig, checkSyntaxOnly); err != nil { - return nil, err + return nil, nil, err } case *file.SDConfig: if checkSyntaxOnly { @@ -629,17 +700,17 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin for _, file := range c.Files { files, err := filepath.Glob(file) if err != nil { - return nil, err + return nil, nil, err } if len(files) != 0 { for _, f := range files { var targetGroups []*targetgroup.Group targetGroups, err = checkSDFile(f) if err != nil { - return nil, fmt.Errorf("checking SD file %q: %w", file, err) + return nil, nil, fmt.Errorf("checking SD file %q: %w", file, err) } if err := checkTargetGroupsForScrapeConfig(targetGroups, scfg); err != nil { - return nil, err + return nil, nil, err } } continue @@ -648,7 +719,7 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin } case discovery.StaticConfig: if err := checkTargetGroupsForScrapeConfig(c, scfg); err != nil { - return nil, err + return nil, nil, err } } } @@ -665,18 +736,18 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin for _, file := range c.Files { files, err := filepath.Glob(file) if err != nil { - return nil, err + return nil, nil, err } if len(files) != 0 { for _, f := range files { var targetGroups []*targetgroup.Group targetGroups, err = checkSDFile(f) if err != nil { - return nil, fmt.Errorf("checking SD file %q: %w", file, err) + return nil, nil, fmt.Errorf("checking SD file %q: %w", file, err) } if err := checkTargetGroupsForAlertmanager(targetGroups, amcfg); err != nil { - return nil, err + return nil, nil, err } } continue @@ -685,15 +756,15 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin } case discovery.StaticConfig: if err := checkTargetGroupsForAlertmanager(c, amcfg); err != nil { - return nil, err + return nil, nil, err } } } } - return ruleFiles, nil + return ruleFiles, scfgs, nil } -func checkTLSConfig(tlsConfig config_util.TLSConfig, checkSyntaxOnly bool) error { +func checkTLSConfig(tlsConfig promconfig.TLSConfig, checkSyntaxOnly bool) error { if len(tlsConfig.CertFile) > 0 && len(tlsConfig.KeyFile) == 0 { return fmt.Errorf("client cert file %q specified without client key file", tlsConfig.CertFile) } @@ -752,7 +823,7 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) { } // CheckRules validates rule files. -func CheckRules(ls lintConfig, files ...string) int { +func CheckRules(ls rulesLintConfig, files ...string) int { failed := false hasErrors := false if len(files) == 0 { @@ -772,7 +843,7 @@ func CheckRules(ls lintConfig, files ...string) int { } // checkRulesFromStdin validates rule from stdin. -func checkRulesFromStdin(ls lintConfig) (bool, bool) { +func checkRulesFromStdin(ls rulesLintConfig) (bool, bool) { failed := false hasErrors := false fmt.Println("Checking standard input") @@ -781,7 +852,7 @@ func checkRulesFromStdin(ls lintConfig) (bool, bool) { fmt.Fprintln(os.Stderr, " FAILED:", err) return true, true } - rgs, errs := rulefmt.Parse(data) + rgs, errs := rulefmt.Parse(data, ls.ignoreUnknownFields) if errs != nil { failed = true fmt.Fprintln(os.Stderr, " FAILED:") @@ -810,12 +881,12 @@ func checkRulesFromStdin(ls lintConfig) (bool, bool) { } // checkRules validates rule files. -func checkRules(files []string, ls lintConfig) (bool, bool) { +func checkRules(files []string, ls rulesLintConfig) (bool, bool) { failed := false hasErrors := false for _, f := range files { fmt.Println("Checking", f) - rgs, errs := rulefmt.ParseFile(f) + rgs, errs := rulefmt.ParseFile(f, ls.ignoreUnknownFields) if errs != nil { failed = true fmt.Fprintln(os.Stderr, " FAILED:") @@ -844,7 +915,7 @@ func checkRules(files []string, ls lintConfig) (bool, bool) { return failed, hasErrors } -func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []error) { +func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings rulesLintConfig) (int, []error) { numRules := 0 for _, rg := range rgs.Groups { numRules += len(rg.Rules) @@ -868,6 +939,16 @@ func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []e return numRules, nil } +func lintScrapeConfigs(scrapeConfigs []*config.ScrapeConfig, lintSettings configLintConfig) bool { + for _, scfg := range scrapeConfigs { + if lintSettings.lookbackDelta > 0 && scfg.ScrapeInterval >= lintSettings.lookbackDelta { + fmt.Fprintf(os.Stderr, " FAILED: too long scrape interval found, data point will be marked as stale - job: %s, interval: %s\n", scfg.JobName, scfg.ScrapeInterval) + return true + } + } + return false +} + type compareRuleType struct { metric string label labels.Labels @@ -889,40 +970,40 @@ func compare(a, b compareRuleType) int { func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType { var duplicates []compareRuleType - var rules compareRuleTypes + var cRules compareRuleTypes for _, group := range groups { for _, rule := range group.Rules { - rules = append(rules, compareRuleType{ + cRules = append(cRules, compareRuleType{ metric: ruleMetric(rule), - label: labels.FromMap(rule.Labels), + label: rules.FromMaps(group.Labels, rule.Labels), }) } } - if len(rules) < 2 { + if len(cRules) < 2 { return duplicates } - sort.Sort(rules) + sort.Sort(cRules) - last := rules[0] - for i := 1; i < len(rules); i++ { - if compare(last, rules[i]) == 0 { + last := cRules[0] + for i := 1; i < len(cRules); i++ { + if compare(last, cRules[i]) == 0 { // Don't add a duplicated rule multiple times. if len(duplicates) == 0 || compare(last, duplicates[len(duplicates)-1]) != 0 { - duplicates = append(duplicates, rules[i]) + duplicates = append(duplicates, cRules[i]) } } - last = rules[i] + last = cRules[i] } return duplicates } -func ruleMetric(rule rulefmt.RuleNode) string { - if rule.Alert.Value != "" { - return rule.Alert.Value +func ruleMetric(rule rulefmt.Rule) string { + if rule.Alert != "" { + return rule.Alert } - return rule.Record.Value + return rule.Record } var checkMetricsUsage = strings.TrimSpace(` @@ -1176,7 +1257,7 @@ func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outpu return fmt.Errorf("new api client error: %w", err) } - ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, api) + ruleImporter := newRuleImporter(promslog.New(&promslog.Config{}), cfg, api) errs := ruleImporter.loadGroups(ctx, files) for _, err := range errs { if err != nil { @@ -1210,7 +1291,7 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c lb := labels.NewBuilder(labels.EmptyLabels()) for _, tg := range targetGroups { var failures []error - targets, failures = scrape.TargetsFromGroup(tg, scfg, false, targets, lb) + targets, failures = scrape.TargetsFromGroup(tg, scfg, targets, lb) if len(failures) > 0 { first := failures[0] return first @@ -1250,7 +1331,7 @@ func labelsSetPromQL(query, labelMatchType, name, value string) error { return fmt.Errorf("invalid label match type: %s", labelMatchType) } - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { if n, ok := node.(*parser.VectorSelector); ok { var found bool for i, l := range n.LabelMatchers { @@ -1281,7 +1362,7 @@ func labelsDeletePromQL(query, name string) error { return err } - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { if n, ok := node.(*parser.VectorSelector); ok { for i, l := range n.LabelMatchers { if l.Name == name { diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 78500fe937..f922d18c4e 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -31,12 +31,20 @@ import ( "testing" "time" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" + "github.com/prometheus/prometheus/promql/promqltest" ) +func init() { + // This can be removed when the legacy global mode is fully deprecated. + //nolint:staticcheck + model.NameValidationScheme = model.UTF8Validation +} + var promtoolPath = os.Args[0] func TestMain(m *testing.M) { @@ -53,6 +61,7 @@ func TestMain(m *testing.M) { } func TestQueryRange(t *testing.T) { + t.Parallel() s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "matrix", "result": []}}`) defer s.Close() @@ -76,6 +85,7 @@ func TestQueryRange(t *testing.T) { } func TestQueryInstant(t *testing.T) { + t.Parallel() s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "vector", "result": []}}`) defer s.Close() @@ -107,6 +117,7 @@ func mockServer(code int, body string) (*httptest.Server, func() *http.Request) } func TestCheckSDFile(t *testing.T) { + t.Parallel() cases := []struct { name string file string @@ -126,8 +137,8 @@ func TestCheckSDFile(t *testing.T) { }, { name: "bad file extension", - file: "./testdata/bad-sd-file-extension.nonexistant", - err: "invalid file extension: \".nonexistant\"", + file: "./testdata/bad-sd-file-extension.nonexistent", + err: "invalid file extension: \".nonexistent\"", }, { name: "bad format", @@ -137,9 +148,10 @@ func TestCheckSDFile(t *testing.T) { } for _, test := range cases { t.Run(test.name, func(t *testing.T) { + t.Parallel() _, err := checkSDFile(test.file) if test.err != "" { - require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -148,6 +160,7 @@ func TestCheckSDFile(t *testing.T) { } func TestCheckDuplicates(t *testing.T) { + t.Parallel() cases := []struct { name string ruleFile string @@ -172,7 +185,8 @@ func TestCheckDuplicates(t *testing.T) { for _, test := range cases { c := test t.Run(c.name, func(t *testing.T) { - rgs, err := rulefmt.ParseFile(c.ruleFile) + t.Parallel() + rgs, err := rulefmt.ParseFile(c.ruleFile, false) require.Empty(t, err) dups := checkDuplicates(rgs.Groups) require.Equal(t, c.expectedDups, dups) @@ -181,7 +195,7 @@ func TestCheckDuplicates(t *testing.T) { } func BenchmarkCheckDuplicates(b *testing.B) { - rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml") + rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml", false) require.Empty(b, err) b.ResetTimer() @@ -191,6 +205,7 @@ func BenchmarkCheckDuplicates(b *testing.B) { } func TestCheckTargetConfig(t *testing.T) { + t.Parallel() cases := []struct { name string file string @@ -219,9 +234,10 @@ func TestCheckTargetConfig(t *testing.T) { } for _, test := range cases { t.Run(test.name, func(t *testing.T) { - _, err := checkConfig(false, "testdata/"+test.file, false) + t.Parallel() + _, _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { - require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -230,6 +246,7 @@ func TestCheckTargetConfig(t *testing.T) { } func TestCheckConfigSyntax(t *testing.T) { + t.Parallel() cases := []struct { name string file string @@ -302,13 +319,14 @@ func TestCheckConfigSyntax(t *testing.T) { } for _, test := range cases { t.Run(test.name, func(t *testing.T) { - _, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly) + t.Parallel() + _, _, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly) expectedErrMsg := test.err if strings.Contains(runtime.GOOS, "windows") { expectedErrMsg = test.errWindows } if expectedErrMsg != "" { - require.Equalf(t, expectedErrMsg, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + require.EqualErrorf(t, err, expectedErrMsg, "Expected error %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -317,6 +335,7 @@ func TestCheckConfigSyntax(t *testing.T) { } func TestAuthorizationConfig(t *testing.T) { + t.Parallel() cases := []struct { name string file string @@ -336,9 +355,10 @@ func TestAuthorizationConfig(t *testing.T) { for _, test := range cases { t.Run(test.name, func(t *testing.T) { - _, err := checkConfig(false, "testdata/"+test.file, false) + t.Parallel() + _, _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { - require.Contains(t, err.Error(), test.err, "Expected error to contain %q, got %q", test.err, err.Error()) + require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error()) return } require.NoError(t, err) @@ -350,6 +370,7 @@ func TestCheckMetricsExtended(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping on windows") } + t.Parallel() f, err := os.Open("testdata/metrics-test.prom") require.NoError(t, err) @@ -386,6 +407,7 @@ func TestExitCodes(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() for _, c := range []struct { file string @@ -410,8 +432,10 @@ func TestExitCodes(t *testing.T) { }, } { t.Run(c.file, func(t *testing.T) { + t.Parallel() for _, lintFatal := range []bool{true, false} { t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) { + t.Parallel() args := []string{"-test.main", "check", "config", "testdata/" + c.file} if lintFatal { args = append(args, "--lint-fatal") @@ -442,6 +466,7 @@ func TestDocumentation(t *testing.T) { if runtime.GOOS == "windows" { t.SkipNow() } + t.Parallel() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -484,8 +509,8 @@ func TestCheckRules(t *testing.T) { defer func(v *os.File) { os.Stdin = v }(os.Stdin) os.Stdin = r - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false)) - require.Equal(t, successExitCode, exitCode, "") + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false)) + require.Equal(t, successExitCode, exitCode) }) t.Run("rules-bad", func(t *testing.T) { @@ -506,8 +531,8 @@ func TestCheckRules(t *testing.T) { defer func(v *os.File) { os.Stdin = v }(os.Stdin) os.Stdin = r - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false)) - require.Equal(t, failureExitCode, exitCode, "") + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false)) + require.Equal(t, failureExitCode, exitCode) }) t.Run("rules-lint-fatal", func(t *testing.T) { @@ -528,24 +553,125 @@ func TestCheckRules(t *testing.T) { defer func(v *os.File) { os.Stdin = v }(os.Stdin) os.Stdin = r - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true)) - require.Equal(t, lintErrExitCode, exitCode, "") + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false)) + require.Equal(t, lintErrExitCode, exitCode) }) } +func TestCheckRulesWithFeatureFlag(t *testing.T) { + // As opposed to TestCheckRules calling CheckRules directly we run promtool + // so the feature flag parsing can be tested. + + args := []string{"-test.main", "--enable-feature=promql-experimental-functions", "check", "rules", "testdata/features.yml"} + tool := exec.Command(promtoolPath, args...) + err := tool.Run() + require.NoError(t, err) +} + func TestCheckRulesWithRuleFiles(t *testing.T) { t.Run("rules-good", func(t *testing.T) { - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml") - require.Equal(t, successExitCode, exitCode, "") + t.Parallel() + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules.yml") + require.Equal(t, successExitCode, exitCode) }) t.Run("rules-bad", func(t *testing.T) { - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml") - require.Equal(t, failureExitCode, exitCode, "") + t.Parallel() + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules-bad.yml") + require.Equal(t, failureExitCode, exitCode) }) t.Run("rules-lint-fatal", func(t *testing.T) { - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml") - require.Equal(t, lintErrExitCode, exitCode, "") + t.Parallel() + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false), "./testdata/prometheus-rules.lint.yml") + require.Equal(t, lintErrExitCode, exitCode) }) } + +func TestCheckScrapeConfigs(t *testing.T) { + for _, tc := range []struct { + name string + lookbackDelta model.Duration + expectError bool + }{ + { + name: "scrape interval less than lookback delta", + lookbackDelta: model.Duration(11 * time.Minute), + expectError: false, + }, + { + name: "scrape interval greater than lookback delta", + lookbackDelta: model.Duration(5 * time.Minute), + expectError: true, + }, + { + name: "scrape interval same as lookback delta", + lookbackDelta: model.Duration(10 * time.Minute), + expectError: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + // Non-fatal linting. + code := CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, false, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + require.Equal(t, successExitCode, code, "Non-fatal linting should return success") + // Fatal linting. + code = CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + if tc.expectError { + require.Equal(t, lintErrExitCode, code, "Fatal linting should return error") + } else { + require.Equal(t, successExitCode, code, "Fatal linting should return success when there are no problems") + } + // Check syntax only, no linting. + code = CheckConfig(false, true, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + require.Equal(t, successExitCode, code, "Fatal linting should return success when checking syntax only") + // Lint option "none" should disable linting. + code = CheckConfig(false, false, newConfigLintConfig(lintOptionNone+","+lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + require.Equal(t, successExitCode, code, `Fatal linting should return success when lint option "none" is specified`) + }) + } +} + +func TestTSDBDumpCommand(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + t.Parallel() + + storage := promqltest.LoadedStorage(t, ` + load 1m + metric{foo="bar"} 1 2 3 + `) + t.Cleanup(func() { storage.Close() }) + + for _, c := range []struct { + name string + subCmd string + sandboxDirRoot string + }{ + { + name: "dump", + subCmd: "dump", + }, + { + name: "dump with sandbox dir root", + subCmd: "dump", + sandboxDirRoot: t.TempDir(), + }, + { + name: "dump-openmetrics", + subCmd: "dump-openmetrics", + }, + { + name: "dump-openmetrics with sandbox dir root", + subCmd: "dump-openmetrics", + sandboxDirRoot: t.TempDir(), + }, + } { + t.Run(c.name, func(t *testing.T) { + t.Parallel() + args := []string{"-test.main", "tsdb", c.subCmd, storage.Dir()} + cmd := exec.Command(promtoolPath, args...) + require.NoError(t, cmd.Run()) + }) + } +} diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index 46246b672a..56b5209541 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -23,15 +23,15 @@ import ( "os" "time" - "github.com/golang/snappy" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/util/compression" "github.com/prometheus/prometheus/util/fmtutil" ) -// Push metrics to a prometheus remote write (for testing purpose only). +// PushMetrics to a prometheus remote write (for testing purpose only). func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int { addressURL, err := url.Parse(url.String()) if err != nil { @@ -101,6 +101,7 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin return successExitCode } +// TODO(bwplotka): Add PRW 2.0 support. func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool { metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels) if err != nil { @@ -115,8 +116,13 @@ func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]s } // Encode the request body into snappy encoding. - compressed := snappy.Encode(nil, raw) - err = client.Store(context.Background(), compressed, 0) + compressed, err := compression.Encode(compression.Snappy, raw, nil) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return false + } + + _, err = client.Store(context.Background(), compressed, 0) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) return false diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index 5a18644842..b2eb18ca8e 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -16,12 +16,12 @@ package main import ( "context" "fmt" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -38,7 +38,7 @@ type queryRangeAPI interface { } type ruleImporter struct { - logger log.Logger + logger *slog.Logger config ruleImporterConfig apiClient queryRangeAPI @@ -57,8 +57,8 @@ type ruleImporterConfig struct { // newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series // written to disk in blocks. -func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter { - level.Info(logger).Log("backfiller", "new rule importer", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822)) +func newRuleImporter(logger *slog.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter { + logger.Info("new rule importer", "component", "backfiller", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822)) return &ruleImporter{ logger: logger, config: config, @@ -69,7 +69,7 @@ func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient que // loadGroups parses groups from a list of recording rule files. func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) { - groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...) + groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, false, filenames...) if errs != nil { return errs } @@ -80,10 +80,10 @@ func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) // importAll evaluates all the recording rules and creates new time series and writes them to disk in blocks. func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) { for name, group := range importer.groups { - level.Info(importer.logger).Log("backfiller", "processing group", "name", name) + importer.logger.Info("processing group", "component", "backfiller", "name", name) for i, r := range group.Rules() { - level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name()) + importer.logger.Info("processing rule", "component", "backfiller", "id", i, "name", r.Name()) if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil { errs = append(errs, err) } @@ -124,7 +124,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName return fmt.Errorf("query range: %w", err) } if warnings != nil { - level.Warn(importer.logger).Log("msg", "Range query returned warnings.", "warnings", warnings) + importer.logger.Warn("Range query returned warnings.", "warnings", warnings) } // To prevent races with compaction, a block writer only allows appending samples @@ -133,7 +133,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName // also need to append samples throughout the whole block range. To allow that, we // pretend that the block is twice as large here, but only really add sample in the // original interval later. - w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration) + w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), importer.config.outputDir, 2*blockDuration) if err != nil { return fmt.Errorf("new block writer: %w", err) } diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index d55fb0c896..3cb47aa8af 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -21,9 +21,9 @@ import ( "testing" "time" - "github.com/go-kit/log" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" @@ -35,7 +35,7 @@ type mockQueryRangeAPI struct { samples model.Matrix } -func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { +func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, _ string, _ v1.Range, _ ...v1.Option) (model.Value, v1.Warnings, error) { return mockAPI.samples, v1.Warnings{}, nil } @@ -43,6 +43,7 @@ const defaultBlockDuration = time.Duration(tsdb.DefaultBlockDuration) * time.Mil // TestBackfillRuleIntegration is an integration test that runs all the rule importer code to confirm the parts work together. func TestBackfillRuleIntegration(t *testing.T) { + t.Parallel() const ( testMaxSampleCount = 50 testValue = 123 @@ -72,6 +73,7 @@ func TestBackfillRuleIntegration(t *testing.T) { } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { + t.Parallel() tmpDir := t.TempDir() ctx := context.Background() @@ -161,7 +163,7 @@ func TestBackfillRuleIntegration(t *testing.T) { } func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { - logger := log.NewNopLogger() + logger := promslog.NewNopLogger() cfg := ruleImporterConfig{ outputDir: tmpDir, start: start.Add(-10 * time.Hour), @@ -210,6 +212,7 @@ func createMultiRuleTestFiles(path string) error { // TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics // received from Prometheus Query API, including the __name__ label. func TestBackfillLabels(t *testing.T) { + t.Parallel() tmpDir := t.TempDir() ctx := context.Background() @@ -251,6 +254,7 @@ func TestBackfillLabels(t *testing.T) { require.NoError(t, err) t.Run("correct-labels", func(t *testing.T) { + t.Parallel() selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) for selectedSeries.Next() { series := selectedSeries.At() diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go index e65262d439..884864205c 100644 --- a/cmd/promtool/sd.go +++ b/cmd/promtool/sd.go @@ -20,9 +20,9 @@ import ( "os" "time" - "github.com/go-kit/log" "github.com/google/go-cmp/cmp" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" @@ -38,10 +38,10 @@ type sdCheckResult struct { } // CheckSD performs service discovery for the given job name and reports the results. -func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool, registerer prometheus.Registerer) int { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) +func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, _ prometheus.Registerer) int { + logger := promslog.New(&promslog.Config{}) - cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) + cfg, err := config.LoadFile(sdConfigFiles, false, logger) if err != nil { fmt.Fprintln(os.Stderr, "Cannot load config", err) return failureExitCode @@ -114,7 +114,7 @@ outerLoop: } results := []sdCheckResult{} for _, tgs := range sdCheckResults { - results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...) + results = append(results, getSDCheckResult(tgs, scrapeConfig)...) } res, err := json.MarshalIndent(results, "", " ") @@ -127,7 +127,7 @@ outerLoop: return successExitCode } -func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult { +func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult { sdCheckResults := []sdCheckResult{} lb := labels.NewBuilder(labels.EmptyLabels()) for _, targetGroup := range targetGroups { @@ -144,7 +144,9 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc } } - res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort) + scrape.PopulateDiscoveredLabels(lb, scrapeConfig, target, targetGroup.Labels) + orig := lb.Labels() + res, err := scrape.PopulateLabels(lb, scrapeConfig, target, targetGroup.Labels) result := sdCheckResult{ DiscoveredLabels: orig, Labels: res, diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go index cb65ee72aa..8f174a9b80 100644 --- a/cmd/promtool/sd_test.go +++ b/cmd/promtool/sd_test.go @@ -18,17 +18,17 @@ import ( "time" "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/util/testutil" - - "github.com/stretchr/testify/require" ) func TestSDCheckResult(t *testing.T) { + t.Parallel() targetGroups := []*targetgroup.Group{{ Targets: []model.LabelSet{ map[model.LabelName]model.LabelValue{"__address__": "localhost:8080", "foo": "bar"}, @@ -70,5 +70,5 @@ func TestSDCheckResult(t *testing.T) { }, } - testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true)) + testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig)) } diff --git a/cmd/promtool/testdata/bad-sd-file-extension.nonexistant b/cmd/promtool/testdata/bad-sd-file-extension.nonexistent similarity index 100% rename from cmd/promtool/testdata/bad-sd-file-extension.nonexistant rename to cmd/promtool/testdata/bad-sd-file-extension.nonexistent diff --git a/cmd/promtool/testdata/config_with_service_discovery_files.yml b/cmd/promtool/testdata/config_with_service_discovery_files.yml index 13b6d7faff..6a550a8403 100644 --- a/cmd/promtool/testdata/config_with_service_discovery_files.yml +++ b/cmd/promtool/testdata/config_with_service_discovery_files.yml @@ -6,7 +6,7 @@ scrape_configs: alerting: alertmanagers: - scheme: http - api_version: v1 + api_version: v2 file_sd_configs: - files: - nonexistent_file.yml diff --git a/cmd/promtool/testdata/features.yml b/cmd/promtool/testdata/features.yml new file mode 100644 index 0000000000..769f8362bf --- /dev/null +++ b/cmd/promtool/testdata/features.yml @@ -0,0 +1,6 @@ +groups: + - name: features + rules: + - record: x + # We don't expect anything from this, just want to check the function parses. + expr: sort_by_label(up, "instance") diff --git a/cmd/promtool/testdata/prometheus-config.lint.too_long_scrape_interval.yml b/cmd/promtool/testdata/prometheus-config.lint.too_long_scrape_interval.yml new file mode 100644 index 0000000000..0c85d13f31 --- /dev/null +++ b/cmd/promtool/testdata/prometheus-config.lint.too_long_scrape_interval.yml @@ -0,0 +1,3 @@ +scrape_configs: + - job_name: too_long_scrape_interval_test + scrape_interval: 10m diff --git a/cmd/promtool/testdata/rules_extrafields.yml b/cmd/promtool/testdata/rules_extrafields.yml new file mode 100644 index 0000000000..85ef079bb8 --- /dev/null +++ b/cmd/promtool/testdata/rules_extrafields.yml @@ -0,0 +1,33 @@ +# This is the rules file. It has an extra "ownership" +# field in the second group. promtool should ignore this field +# and not return an error with --ignore-unknown-fields. + +groups: + - name: alerts + namespace: "foobar" + rules: + - alert: InstanceDown + expr: up == 0 + for: 5m + labels: + severity: page + annotations: + summary: "Instance {{ $labels.instance }} down" + description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes." + - alert: AlwaysFiring + expr: 1 + + - name: rules + ownership: + service: "test" + rules: + - record: job:test:count_over_time1m + expr: sum without(instance) (count_over_time(test[1m])) + + # A recording rule that doesn't depend on input series. + - record: fixed_data + expr: 1 + + # Subquery with default resolution test. + - record: suquery_interval_test + expr: count_over_time(up[5m:]) diff --git a/cmd/promtool/testdata/rules_run_extrafields.yml b/cmd/promtool/testdata/rules_run_extrafields.yml new file mode 100644 index 0000000000..86879fc396 --- /dev/null +++ b/cmd/promtool/testdata/rules_run_extrafields.yml @@ -0,0 +1,21 @@ +# Minimal test case to see that --ignore-unknown-fields +# is working as expected. It should not return an error +# when any extra fields are present in the rules file. +rule_files: + - rules_extrafields.yml + +evaluation_interval: 1m + + +tests: + - name: extra ownership field test + input_series: + - series: test + values: 1 + + promql_expr_test: + - expr: test + eval_time: 0 + exp_samples: + - value: 1 + labels: test diff --git a/cmd/promtool/testdata/rules_run_fuzzy.yml b/cmd/promtool/testdata/rules_run_fuzzy.yml new file mode 100644 index 0000000000..3bf4e47a45 --- /dev/null +++ b/cmd/promtool/testdata/rules_run_fuzzy.yml @@ -0,0 +1,43 @@ +# Minimal test case to see that fuzzy compare is working as expected. +# It should allow slight floating point differences through. Larger +# floating point differences should still fail. + +evaluation_interval: 1m +fuzzy_compare: true + +tests: + - name: correct fuzzy match + input_series: + - series: test_low + values: 2.9999999999999996 + - series: test_high + values: 3.0000000000000004 + promql_expr_test: + - expr: test_low + eval_time: 0 + exp_samples: + - labels: test_low + value: 3 + - expr: test_high + eval_time: 0 + exp_samples: + - labels: test_high + value: 3 + + - name: wrong fuzzy match + input_series: + - series: test_low + values: 2.9999999999999987 + - series: test_high + values: 3.0000000000000013 + promql_expr_test: + - expr: test_low + eval_time: 0 + exp_samples: + - labels: test_low + value: 3 + - expr: test_high + eval_time: 0 + exp_samples: + - labels: test_high + value: 3 diff --git a/cmd/promtool/testdata/rules_run_no_fuzzy.yml b/cmd/promtool/testdata/rules_run_no_fuzzy.yml new file mode 100644 index 0000000000..eba201a28c --- /dev/null +++ b/cmd/promtool/testdata/rules_run_no_fuzzy.yml @@ -0,0 +1,24 @@ +# Minimal test case to see that fuzzy compare can be turned off, +# and slight floating point differences fail matching. + +evaluation_interval: 1m +fuzzy_compare: false + +tests: + - name: correct fuzzy match + input_series: + - series: test_low + values: 2.9999999999999996 + - series: test_high + values: 3.0000000000000004 + promql_expr_test: + - expr: test_low + eval_time: 0 + exp_samples: + - labels: test_low + value: 3 + - expr: test_high + eval_time: 0 + exp_samples: + - labels: test_high + value: 3 diff --git a/cmd/promtool/testdata/unittest.yml b/cmd/promtool/testdata/unittest.yml index ff511729ba..e2a8230902 100644 --- a/cmd/promtool/testdata/unittest.yml +++ b/cmd/promtool/testdata/unittest.yml @@ -69,13 +69,13 @@ tests: eval_time: 2m exp_samples: - labels: "test_histogram_repeat" - histogram: "{{count:2 sum:3 buckets:[2]}}" + histogram: "{{count:2 sum:3 counter_reset_hint:not_reset buckets:[2]}}" - expr: test_histogram_increase eval_time: 2m exp_samples: - labels: "test_histogram_increase" - histogram: "{{count:4 sum:5.6 buckets:[4]}}" + histogram: "{{count:4 sum:5.6 counter_reset_hint:not_reset buckets:[4]}}" # Ensure a value is stale as soon as it is marked as such. - expr: test_stale @@ -89,11 +89,11 @@ tests: # Ensure lookback delta is respected, when a value is missing. - expr: timestamp(test_missing) - eval_time: 5m + eval_time: 4m59s exp_samples: - value: 0 - expr: timestamp(test_missing) - eval_time: 5m1s + eval_time: 5m exp_samples: [] # Minimal test case to check edge case of a single sample. @@ -113,7 +113,7 @@ tests: - expr: count_over_time(fixed_data[1h]) eval_time: 1h exp_samples: - - value: 61 + - value: 60 - expr: timestamp(fixed_data) eval_time: 1h exp_samples: @@ -183,7 +183,7 @@ tests: - expr: job:test:count_over_time1m eval_time: 1m exp_samples: - - value: 61 + - value: 60 labels: 'job:test:count_over_time1m{job="test"}' - expr: timestamp(job:test:count_over_time1m) eval_time: 1m10s @@ -194,7 +194,7 @@ tests: - expr: job:test:count_over_time1m eval_time: 2m exp_samples: - - value: 61 + - value: 60 labels: 'job:test:count_over_time1m{job="test"}' - expr: timestamp(job:test:count_over_time1m) eval_time: 2m59s999ms diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 2ed7244b1c..f512728ac9 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "runtime" @@ -32,7 +33,7 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" + "github.com/prometheus/common/promslog" "go.uber.org/atomic" "github.com/prometheus/prometheus/model/labels" @@ -60,7 +61,7 @@ type writeBenchmark struct { memprof *os.File blockprof *os.File mtxprof *os.File - logger log.Logger + logger *slog.Logger } func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) error { @@ -68,7 +69,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err outPath: outPath, samplesFile: samplesFile, numMetrics: numMetrics, - logger: log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), + logger: promslog.New(&promslog.Config{}), } if b.outPath == "" { dir, err := os.MkdirTemp("", "tsdb_bench") @@ -87,9 +88,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err dir := filepath.Join(b.outPath, "storage") - l := log.With(b.logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - - st, err := tsdb.Open(dir, l, nil, &tsdb.Options{ + st, err := tsdb.Open(dir, b.logger, nil, &tsdb.Options{ RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), MinBlockDuration: int64(2 * time.Hour / time.Millisecond), }, tsdb.NewDBStats()) @@ -315,12 +314,11 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) { i := 0 for scanner.Scan() && i < n { - m := make([]labels.Label, 0, 10) - r := strings.NewReplacer("\"", "", "{", "", "}", "") s := r.Replace(scanner.Text()) labelChunks := strings.Split(s, ",") + m := make([]labels.Label, 0, len(labelChunks)) for _, labelChunk := range labelChunks { split := strings.Split(labelChunk, ":") m = append(m, labels.Label{Name: split[0], Value: split[1]}) @@ -367,25 +365,25 @@ func printBlocks(blocks []tsdb.BlockReader, writeHeader, humanReadable bool) { fmt.Fprintf(tw, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", meta.ULID, - getFormatedTime(meta.MinTime, humanReadable), - getFormatedTime(meta.MaxTime, humanReadable), + getFormattedTime(meta.MinTime, humanReadable), + getFormattedTime(meta.MaxTime, humanReadable), time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond, meta.Stats.NumSamples, meta.Stats.NumChunks, meta.Stats.NumSeries, - getFormatedBytes(b.Size(), humanReadable), + getFormattedBytes(b.Size(), humanReadable), ) } } -func getFormatedTime(timestamp int64, humanReadable bool) string { +func getFormattedTime(timestamp int64, humanReadable bool) string { if humanReadable { return time.Unix(timestamp/1000, 0).UTC().String() } return strconv.FormatInt(timestamp, 10) } -func getFormatedBytes(bytes int64, humanReadable bool) string { +func getFormattedBytes(bytes int64, humanReadable bool) string { if humanReadable { return units.Base2Bytes(bytes).String() } @@ -405,7 +403,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) } } - b, err := db.Block(blockID) + b, err := db.Block(blockID, tsdb.DefaultPostingsDecoderFactory) if err != nil { return nil, nil, err } @@ -554,7 +552,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten postingInfos = postingInfos[:0] for _, n := range allLabelNames { - values, err := ir.SortedLabelValues(ctx, n, selectors...) + values, err := ir.SortedLabelValues(ctx, n, nil, selectors...) if err != nil { return err } @@ -570,7 +568,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten postingInfos = postingInfos[:0] for _, n := range allLabelNames { - lv, err := ir.SortedLabelValues(ctx, n, selectors...) + lv, err := ir.SortedLabelValues(ctx, n, nil, selectors...) if err != nil { return err } @@ -580,7 +578,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten printInfo(postingInfos) postingInfos = postingInfos[:0] - lv, err := ir.SortedLabelValues(ctx, "__name__", selectors...) + lv, err := ir.SortedLabelValues(ctx, "__name__", nil, selectors...) if err != nil { return err } @@ -589,7 +587,10 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten if err != nil { return err } - postings = index.Intersect(postings, index.NewListPostings(refs)) + // Only intersect postings if matchers are specified. + if len(matchers) > 0 { + postings = index.Intersect(postings, index.NewListPostings(refs)) + } count := 0 for postings.Next() { count++ @@ -662,7 +663,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb. histogramChunkSize = append(histogramChunkSize, len(chk.Bytes())) fhchk, ok := chk.(*chunkenc.FloatHistogramChunk) if !ok { - return fmt.Errorf("chunk is not FloatHistogramChunk") + return errors.New("chunk is not FloatHistogramChunk") } it := fhchk.Iterator(nil) bucketCount := 0 @@ -677,7 +678,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb. histogramChunkSize = append(histogramChunkSize, len(chk.Bytes())) hchk, ok := chk.(*chunkenc.HistogramChunk) if !ok { - return fmt.Errorf("chunk is not HistogramChunk") + return errors.New("chunk is not HistogramChunk") } it := hchk.Iterator(nil) bucketCount := 0 @@ -733,7 +734,7 @@ func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt i for _, mset := range matcherSets { sets = append(sets, q.Select(ctx, true, nil, mset...)) } - ss = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) + ss = storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge) } else { ss = q.Select(ctx, false, nil, matcherSets[0]...) } @@ -823,18 +824,32 @@ func checkErr(err error) int { return 0 } -func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int { - inputFile, err := fileutil.OpenMmapFile(path) +func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration, customLabels map[string]string) int { + var buf []byte + info, err := os.Stat(path) if err != nil { return checkErr(err) } - defer inputFile.Close() + if info.Mode()&(os.ModeNamedPipe|os.ModeCharDevice) != 0 { + // Read the pipe chunks by chunks as it cannot be mmap-ed + buf, err = os.ReadFile(path) + if err != nil { + return checkErr(err) + } + } else { + inputFile, err := fileutil.OpenMmapFile(path) + if err != nil { + return checkErr(err) + } + defer inputFile.Close() + buf = inputFile.Bytes() + } if err := os.MkdirAll(outputDir, 0o777); err != nil { return checkErr(fmt.Errorf("create output dir: %w", err)) } - return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration)) + return checkErr(backfill(5000, buf, outputDir, humanReadable, quiet, maxBlockDuration, customLabels)) } func displayHistogram(dataType string, datas []int, total int) { @@ -866,16 +881,16 @@ func displayHistogram(dataType string, datas []int, total int) { fmt.Println() } -func generateBucket(min, max int) (start, end, step int) { - s := (max - min) / 10 +func generateBucket(minVal, maxVal int) (start, end, step int) { + s := (maxVal - minVal) / 10 step = 10 for step < s && step <= 10000 { step *= 10 } - start = min - min%step - end = max - max%step + step + start = minVal - minVal%step + end = maxVal - maxVal%step + step return } diff --git a/cmd/promtool/tsdb_posix_test.go b/cmd/promtool/tsdb_posix_test.go new file mode 100644 index 0000000000..8a83aead70 --- /dev/null +++ b/cmd/promtool/tsdb_posix_test.go @@ -0,0 +1,69 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows + +package main + +import ( + "bytes" + "io" + "math" + "os" + "path" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/tsdb" +) + +func TestTSDBDumpOpenMetricsRoundTripPipe(t *testing.T) { + initialMetrics, err := os.ReadFile("testdata/dump-openmetrics-roundtrip-test.prom") + require.NoError(t, err) + initialMetrics = normalizeNewLine(initialMetrics) + + pipeDir := t.TempDir() + dbDir := t.TempDir() + + // create pipe + pipe := path.Join(pipeDir, "pipe") + err = syscall.Mkfifo(pipe, 0o666) + require.NoError(t, err) + + go func() { + // open pipe to write + in, err := os.OpenFile(pipe, os.O_WRONLY, os.ModeNamedPipe) + require.NoError(t, err) + defer func() { require.NoError(t, in.Close()) }() + _, err = io.Copy(in, bytes.NewReader(initialMetrics)) + require.NoError(t, err) + }() + + // Import samples from OM format + code := backfillOpenMetrics(pipe, dbDir, false, false, 2*time.Hour, map[string]string{}) + require.Equal(t, 0, code) + db, err := tsdb.Open(dbDir, nil, nil, tsdb.DefaultOptions(), nil) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + // Dump the blocks into OM format + dumpedMetrics := getDumpedSamples(t, dbDir, "", math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) + + // Should get back the initial metrics. + require.Equal(t, string(initialMetrics), dumpedMetrics) +} diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go index 75089b168b..e745a3fe7a 100644 --- a/cmd/promtool/tsdb_test.go +++ b/cmd/promtool/tsdb_test.go @@ -20,6 +20,7 @@ import ( "math" "os" "runtime" + "slices" "strings" "testing" "time" @@ -31,6 +32,7 @@ import ( ) func TestGenerateBucket(t *testing.T) { + t.Parallel() tcs := []struct { min, max int start, end, step int @@ -54,7 +56,7 @@ func TestGenerateBucket(t *testing.T) { } // getDumpedSamples dumps samples and returns them. -func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string { +func getDumpedSamples(t *testing.T, databasePath, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string { t.Helper() oldStdout := os.Stdout @@ -63,8 +65,8 @@ func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []strin err := dumpSamples( context.Background(), - path, - t.TempDir(), + databasePath, + sandboxDirRoot, mint, maxt, match, @@ -95,13 +97,15 @@ func TestTSDBDump(t *testing.T) { heavy_metric{foo="bar"} 5 4 3 2 1 heavy_metric{foo="foo"} 5 4 3 2 1 `) + t.Cleanup(func() { storage.Close() }) tests := []struct { - name string - mint int64 - maxt int64 - match []string - expectedDump string + name string + mint int64 + maxt int64 + sandboxDirRoot string + match []string + expectedDump string }{ { name: "default match", @@ -110,6 +114,14 @@ func TestTSDBDump(t *testing.T) { match: []string{"{__name__=~'(?s:.*)'}"}, expectedDump: "testdata/dump-test-1.prom", }, + { + name: "default match with sandbox dir root set", + mint: math.MinInt64, + maxt: math.MaxInt64, + sandboxDirRoot: t.TempDir(), + match: []string{"{__name__=~'(?s:.*)'}"}, + expectedDump: "testdata/dump-test-1.prom", + }, { name: "same matcher twice", mint: math.MinInt64, @@ -148,28 +160,51 @@ func TestTSDBDump(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.mint, tt.maxt, tt.match, formatSeriesSet) + dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, tt.mint, tt.maxt, tt.match, formatSeriesSet) expectedMetrics, err := os.ReadFile(tt.expectedDump) require.NoError(t, err) expectedMetrics = normalizeNewLine(expectedMetrics) - // even though in case of one matcher samples are not sorted, the order in the cases above should stay the same. - require.Equal(t, string(expectedMetrics), dumpedMetrics) + // Sort both, because Prometheus does not guarantee the output order. + require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics)) }) } } +func sortLines(buf string) string { + lines := strings.Split(buf, "\n") + slices.Sort(lines) + return strings.Join(lines, "\n") +} + func TestTSDBDumpOpenMetrics(t *testing.T) { storage := promqltest.LoadedStorage(t, ` load 1m my_counter{foo="bar", baz="abc"} 1 2 3 4 5 my_gauge{bar="foo", abc="baz"} 9 8 0 4 7 `) + t.Cleanup(func() { storage.Close() }) - expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom") - require.NoError(t, err) - expectedMetrics = normalizeNewLine(expectedMetrics) - dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) - require.Equal(t, string(expectedMetrics), dumpedMetrics) + tests := []struct { + name string + sandboxDirRoot string + }{ + { + name: "default match", + }, + { + name: "default match with sandbox dir root set", + sandboxDirRoot: t.TempDir(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom") + require.NoError(t, err) + expectedMetrics = normalizeNewLine(expectedMetrics) + dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) + require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics)) + }) + } } func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { @@ -179,7 +214,7 @@ func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { dbDir := t.TempDir() // Import samples from OM format - err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour) + err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour, map[string]string{}) require.NoError(t, err) db, err := tsdb.Open(dbDir, nil, nil, tsdb.DefaultOptions(), nil) require.NoError(t, err) @@ -188,7 +223,7 @@ func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { }) // Dump the blocks into OM format - dumpedMetrics := getDumpedSamples(t, dbDir, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) + dumpedMetrics := getDumpedSamples(t, dbDir, "", math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) // Should get back the initial metrics. require.Equal(t, string(initialMetrics), dumpedMetrics) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 5451c5296c..4910a0b1a6 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -18,6 +18,8 @@ import ( "encoding/json" "errors" "fmt" + "io" + "math" "os" "path/filepath" "sort" @@ -25,11 +27,11 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/google/go-cmp/cmp" "github.com/grafana/regexp" "github.com/nsf/jsondiff" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/model/histogram" @@ -39,12 +41,18 @@ import ( "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/junitxml" ) // RulesUnitTest does unit testing of rules based on the unit testing files provided. // More info about the file format can be found in the docs. -func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { +func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int { + return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, ignoreUnknownFields, files...) +} + +func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int { failed := false + junit := &junitxml.JUnitXML{} var run *regexp.Regexp if runStrings != nil { @@ -52,7 +60,7 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif } for _, f := range files { - if errs := ruleUnitTest(f, queryOpts, run, diffFlag); errs != nil { + if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, ignoreUnknownFields, junit.Suite(f)); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) @@ -64,25 +72,30 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif } fmt.Println() } + err := junit.WriteXML(results) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to write JUnit XML: %s\n", err) + } if failed { return failureExitCode } return successExitCode } -func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error { - fmt.Println("Unit Testing: ", filename) - +func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug, ignoreUnknownFields bool, ts *junitxml.TestSuite) []error { b, err := os.ReadFile(filename) if err != nil { + ts.Abort(err) return []error{err} } var unitTestInp unitTestFile if err := yaml.UnmarshalStrict(b, &unitTestInp); err != nil { + ts.Abort(err) return []error{err} } if err := resolveAndGlobFilepaths(filepath.Dir(filename), &unitTestInp); err != nil { + ts.Abort(err) return []error{err} } @@ -91,29 +104,38 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg } evalInterval := time.Duration(unitTestInp.EvaluationInterval) - + ts.Settime(time.Now().Format("2006-01-02T15:04:05")) // Giving number for groups mentioned in the file for ordering. // Lower number group should be evaluated before higher number group. groupOrderMap := make(map[string]int) for i, gn := range unitTestInp.GroupEvalOrder { if _, ok := groupOrderMap[gn]; ok { - return []error{fmt.Errorf("group name repeated in evaluation order: %s", gn)} + err := fmt.Errorf("group name repeated in evaluation order: %s", gn) + ts.Abort(err) + return []error{err} } groupOrderMap[gn] = i } // Testing. var errs []error - for _, t := range unitTestInp.Tests { + for i, t := range unitTestInp.Tests { if !matchesRun(t.TestGroupName, run) { continue } - + testname := t.TestGroupName + if testname == "" { + testname = fmt.Sprintf("unnamed#%d", i) + } + tc := ts.Case(testname) if t.Interval == 0 { t.Interval = unitTestInp.EvaluationInterval } - ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...) + ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, ignoreUnknownFields, unitTestInp.FuzzyCompare, unitTestInp.RuleFiles...) if ers != nil { + for _, e := range ers { + tc.Fail(e.Error()) + } errs = append(errs, ers...) } } @@ -138,6 +160,7 @@ type unitTestFile struct { EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` GroupEvalOrder []string `yaml:"group_eval_order"` Tests []testGroup `yaml:"tests"` + FuzzyCompare bool `yaml:"fuzzy_compare,omitempty"` } // resolveAndGlobFilepaths joins all relative paths in a configuration @@ -176,7 +199,14 @@ type testGroup struct { } // test performs the unit tests. -func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) { +func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug, ignoreUnknownFields, fuzzyCompare bool, ruleFiles ...string) (outErr []error) { + if debug { + testStart := time.Now() + fmt.Printf("DEBUG: Starting test %s\n", testname) + defer func() { + fmt.Printf("DEBUG: Test %s finished, took %v\n", testname, time.Since(testStart)) + }() + } // Setup testing suite. suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts) if err != nil { @@ -195,11 +225,11 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i QueryFunc: rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()), Appendable: suite.Storage(), Context: context.Background(), - NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {}, - Logger: log.NewNopLogger(), + NotifyFunc: func(_ context.Context, _ string, _ ...*rules.Alert) {}, + Logger: promslog.NewNopLogger(), } m := rules.NewManager(opts) - groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...) + groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ignoreUnknownFields, ruleFiles...) if ers != nil { return ers } @@ -209,6 +239,14 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i mint := time.Unix(0, 0).UTC() maxt := mint.Add(tg.maxEvalTime()) + // Optional floating point compare fuzzing. + var compareFloat64 cmp.Option = cmp.Options{} + if fuzzyCompare { + compareFloat64 = cmp.Comparer(func(x, y float64) bool { + return x == y || math.Nextafter(x, math.Inf(-1)) == y || math.Nextafter(x, math.Inf(1)) == y + }) + } + // Pre-processing some data for testing alerts. // All this preparation is so that we can test alerts as we evaluate the rules. // This avoids storing them in memory, as the number of evals might be high. @@ -283,12 +321,8 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i return errs } - for { - if !(curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) && - time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint)) { - break - } - + for curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) && + time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint) { // We need to check alerts for this time. // If 'ts <= `eval_time=alertEvalTimes[curr]` < ts+evalInterval' // then we compare alerts with the Eval at `ts`. @@ -346,7 +380,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i sort.Sort(gotAlerts) sort.Sort(expAlerts) - if !cmp.Equal(expAlerts, gotAlerts, cmp.Comparer(labels.Equal)) { + if !cmp.Equal(expAlerts, gotAlerts, cmp.Comparer(labels.Equal), compareFloat64) { var testName string if tg.TestGroupName != "" { testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName) @@ -454,12 +488,38 @@ Outer: sort.Slice(gotSamples, func(i, j int) bool { return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0 }) - if !cmp.Equal(expSamples, gotSamples, cmp.Comparer(labels.Equal)) { + if !cmp.Equal(expSamples, gotSamples, cmp.Comparer(labels.Equal), compareFloat64) { errs = append(errs, fmt.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr, testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples))) } } + if debug { + ts := tg.maxEvalTime() + // Potentially a test can be specified at a time with fractional seconds, + // which PromQL cannot represent, so round up to the next whole second. + ts = (ts + time.Second).Truncate(time.Second) + expr := fmt.Sprintf(`{__name__=~".+"}[%v]`, ts) + q, err := suite.QueryEngine().NewInstantQuery(context.Background(), suite.Queryable(), nil, expr, mint.Add(ts)) + if err != nil { + fmt.Printf("DEBUG: Failed querying, expr: %q, err: %v\n", expr, err) + return errs + } + res := q.Exec(suite.Context()) + if res.Err != nil { + fmt.Printf("DEBUG: Failed query exec, expr: %q, err: %v\n", expr, res.Err) + return errs + } + switch v := res.Value.(type) { + case promql.Matrix: + fmt.Printf("DEBUG: Dump of all data (input_series and rules) at %v:\n", ts) + fmt.Println(v.String()) + default: + fmt.Printf("DEBUG: Got unexpected type %T\n", v) + return errs + } + } + if len(errs) > 0 { return errs } diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go index 2dbd5a4e51..566e0acbc6 100644 --- a/cmd/promtool/unittest_test.go +++ b/cmd/promtool/unittest_test.go @@ -14,14 +14,19 @@ package main import ( + "bytes" + "encoding/xml" + "fmt" "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/promql/promqltest" + "github.com/prometheus/prometheus/util/junitxml" ) func TestRulesUnitTest(t *testing.T) { + t.Parallel() type args struct { files []string } @@ -125,25 +130,75 @@ func TestRulesUnitTest(t *testing.T) { want: 0, }, } + reuseFiles := []string{} + reuseCount := [2]int{} for _, tt := range tests { + if (tt.queryOpts == promqltest.LazyLoaderOpts{ + EnableNegativeOffset: true, + } || tt.queryOpts == promqltest.LazyLoaderOpts{ + EnableAtModifier: true, + }) { + reuseFiles = append(reuseFiles, tt.args.files...) + reuseCount[tt.want] += len(tt.args.files) + } t.Run(tt.name, func(t *testing.T) { - if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want { + t.Parallel() + if got := RulesUnitTest(tt.queryOpts, nil, false, false, false, tt.args.files...); got != tt.want { t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) } }) } + t.Run("Junit xml output ", func(t *testing.T) { + t.Parallel() + var buf bytes.Buffer + if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, false, reuseFiles...); got != 1 { + t.Errorf("RulesUnitTestResults() = %v, want 1", got) + } + var test junitxml.JUnitXML + output := buf.Bytes() + err := xml.Unmarshal(output, &test) + if err != nil { + fmt.Println("error in decoding XML:", err) + return + } + var total int + var passes int + var failures int + var cases int + total = len(test.Suites) + if total != len(reuseFiles) { + t.Errorf("JUnit output had %d testsuite elements; expected %d\n", total, len(reuseFiles)) + } + + for _, i := range test.Suites { + if i.FailureCount == 0 { + passes++ + } else { + failures++ + } + cases += len(i.Cases) + } + if total != passes+failures { + t.Errorf("JUnit output mismatch: Total testsuites (%d) does not equal the sum of passes (%d) and failures (%d).", total, passes, failures) + } + if cases < total { + t.Errorf("JUnit output had %d suites without test cases\n", total-cases) + } + }) } func TestRulesUnitTestRun(t *testing.T) { + t.Parallel() type args struct { run []string files []string } tests := []struct { - name string - args args - queryOpts promqltest.LazyLoaderOpts - want int + name string + args args + queryOpts promqltest.LazyLoaderOpts + want int + ignoreUnknownFields bool }{ { name: "Test all without run arg", @@ -177,10 +232,42 @@ func TestRulesUnitTestRun(t *testing.T) { }, want: 1, }, + { + name: "Test all with extra fields", + args: args{ + files: []string{"./testdata/rules_run_extrafields.yml"}, + }, + ignoreUnknownFields: true, + want: 0, + }, + { + name: "Test precise floating point comparison expected failure", + args: args{ + files: []string{"./testdata/rules_run_no_fuzzy.yml"}, + }, + want: 1, + }, + { + name: "Test fuzzy floating point comparison correct match", + args: args{ + run: []string{"correct"}, + files: []string{"./testdata/rules_run_fuzzy.yml"}, + }, + want: 0, + }, + { + name: "Test fuzzy floating point comparison wrong match", + args: args{ + run: []string{"wrong"}, + files: []string{"./testdata/rules_run_fuzzy.yml"}, + }, + want: 1, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := RulesUnitTest(tt.queryOpts, tt.args.run, false, tt.args.files...) + t.Parallel() + got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.ignoreUnknownFields, tt.args.files...) require.Equal(t, tt.want, got) }) } diff --git a/config/config.go b/config/config.go index c924e30989..12ca828ae8 100644 --- a/config/config.go +++ b/config/config.go @@ -16,27 +16,29 @@ package config import ( "errors" "fmt" + "log/slog" + "mime" "net/url" "os" "path/filepath" + "slices" "sort" "strconv" "strings" "time" "github.com/alecthomas/units" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/prometheus/common/sigv4" + "github.com/prometheus/sigv4" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/storage/remote/azuread" + "github.com/prometheus/prometheus/storage/remote/googleiam" ) var ( @@ -67,7 +69,7 @@ var ( ) // Load parses the YAML input s into a Config. -func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func Load(s string, logger *slog.Logger) (*Config, error) { cfg := &Config{} // If the entire config body is empty the UnmarshalYAML method is // never called. We thus have to set the DefaultConfig at the entry @@ -79,10 +81,6 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro return nil, err } - if !expandExternalLabels { - return cfg, nil - } - b := labels.NewScratchBuilder(0) cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { newV := os.Expand(v.Value, func(s string) string { @@ -92,26 +90,41 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro if v := os.Getenv(s); v != "" { return v } - level.Warn(logger).Log("msg", "Empty environment variable", "name", s) + logger.Warn("Empty environment variable", "name", s) return "" }) if newV != v.Value { - level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) + logger.Debug("External label replaced", "label", v.Name, "input", v.Value, "output", newV) } // Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024 b.Add(v.Name, newV) }) - cfg.GlobalConfig.ExternalLabels = b.Labels() + if !b.Labels().IsEmpty() { + cfg.GlobalConfig.ExternalLabels = b.Labels() + } + + switch cfg.OTLPConfig.TranslationStrategy { + case UnderscoreEscapingWithSuffixes: + case "": + case NoTranslation, NoUTF8EscapingWithSuffixes: + if cfg.GlobalConfig.MetricNameValidationScheme == model.LegacyValidation { + return nil, fmt.Errorf("OTLP translation strategy %q is not allowed when UTF8 is disabled", cfg.OTLPConfig.TranslationStrategy) + } + default: + return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy) + } + cfg.loaded = true return cfg, nil } -// LoadFile parses the given YAML file into a Config. -func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { +// LoadFile parses and validates the given YAML file into a read-only Config. +// Callers should never write to or shallow copy the returned Config. +func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) { content, err := os.ReadFile(filename) if err != nil { return nil, err } - cfg, err := Load(string(content), expandExternalLabels, logger) + cfg, err := Load(string(content), logger) if err != nil { return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err) } @@ -139,6 +152,8 @@ var ( // DefaultConfig is the default top-level configuration. DefaultConfig = Config{ GlobalConfig: DefaultGlobalConfig, + Runtime: DefaultRuntimeConfig, + OTLPConfig: DefaultOTLPConfig, } // DefaultGlobalConfig is the default global configuration. @@ -149,24 +164,30 @@ var ( RuleQueryOffset: model.Duration(0 * time.Minute), // When native histogram feature flag is enabled, ScrapeProtocols default // changes to DefaultNativeHistogramScrapeProtocols. - ScrapeProtocols: DefaultScrapeProtocols, + ScrapeProtocols: DefaultScrapeProtocols, + ConvertClassicHistogramsToNHCB: false, + AlwaysScrapeClassicHistograms: false, + MetricNameValidationScheme: model.UTF8Validation, + MetricNameEscapingScheme: model.AllowUTF8, } DefaultRuntimeConfig = RuntimeConfig{ // Go runtime tuning. - GoGC: 75, + GoGC: getGoGC(), } - // DefaultScrapeConfig is the default scrape configuration. + // DefaultScrapeConfig is the default scrape configuration. Users of this + // default MUST call Validate() on the config after creation, even if it's + // used unaltered, to check for parameter correctness and fill out default + // values that can't be set inline in this declaration. DefaultScrapeConfig = ScrapeConfig{ - // ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals. - ScrapeClassicHistograms: false, - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, - EnableCompression: true, + // ScrapeTimeout, ScrapeInterval, ScrapeProtocols, AlwaysScrapeClassicHistograms, and ConvertClassicHistogramsToNHCB default to the configured globals. + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, + EnableCompression: true, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -177,13 +198,18 @@ var ( HTTPClientConfig: config.DefaultHTTPClientConfig, } + DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{ + FollowRedirects: true, + EnableHTTP2: false, + } + // DefaultRemoteWriteConfig is the default remote write configuration. DefaultRemoteWriteConfig = RemoteWriteConfig{ RemoteTimeout: model.Duration(30 * time.Second), ProtobufMessage: RemoteWriteProtoMsgV1, QueueConfig: DefaultQueueConfig, MetadataConfig: DefaultMetadataConfig, - HTTPClientConfig: config.DefaultHTTPClientConfig, + HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig, } // DefaultQueueConfig is the default remote queue configuration. @@ -215,6 +241,7 @@ var ( // DefaultRemoteReadConfig is the default remote read configuration. DefaultRemoteReadConfig = RemoteReadConfig{ RemoteTimeout: model.Duration(1 * time.Minute), + ChunkedReadLimit: DefaultChunkedReadLimit, HTTPClientConfig: config.DefaultHTTPClientConfig, FilterExternalLabels: true, } @@ -227,6 +254,11 @@ var ( DefaultExemplarsConfig = ExemplarsConfig{ MaxExemplars: 100000, } + + // DefaultOTLPConfig is the default OTLP configuration. + DefaultOTLPConfig = OTLPConfig{ + TranslationStrategy: UnderscoreEscapingWithSuffixes, + } ) // Config is the top-level configuration for Prometheus's config files. @@ -242,9 +274,13 @@ type Config struct { RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` + OTLPConfig OTLPConfig `yaml:"otlp,omitempty"` + + loaded bool // Certain methods require configuration to use Load validation. } // SetDirectory joins any relative file paths with dir. +// This method writes to config, and it's not concurrency safe. func (c *Config) SetDirectory(dir string) { c.GlobalConfig.SetDirectory(dir) c.AlertingConfig.SetDirectory(dir) @@ -274,24 +310,26 @@ func (c Config) String() string { return string(b) } -// GetScrapeConfigs returns the scrape configurations. +// GetScrapeConfigs returns the read-only, validated scrape configurations including +// the ones from the scrape_config_files. +// This method does not write to config, and it's concurrency safe (the pointer receiver is for efficiency). +// This method also assumes the Config was created by Load or LoadFile function, it returns error +// if it was not. We can't re-validate or apply globals here due to races, +// read more https://github.com/prometheus/prometheus/issues/15538. func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { - scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) + if !c.loaded { + // Programmatic error, we warn before more confusing errors would happen due to lack of the globalization. + return nil, errors.New("scrape config cannot be fetched, main config was not validated and loaded correctly; should not happen") + } + scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) jobNames := map[string]string{} for i, scfg := range c.ScrapeConfigs { - // We do these checks for library users that would not call validate in - // Unmarshal. - if err := scfg.Validate(c.GlobalConfig); err != nil { - return nil, err - } - - if _, ok := jobNames[scfg.JobName]; ok { - return nil, fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) - } jobNames[scfg.JobName] = "main config file" scfgs[i] = scfg } + + // Re-read and validate the dynamic scrape config rules. for _, pat := range c.ScrapeConfigFiles { fs, err := filepath.Glob(pat) if err != nil { @@ -327,6 +365,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. +// NOTE: This method should not be used outside of this package. Use Load or LoadFile instead. func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultConfig // We want to set c to the defaults and then overwrite it with the input. @@ -347,8 +386,6 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { // We have to restore it here. if c.Runtime.isZero() { c.Runtime = DefaultRuntimeConfig - // Use the GOGC env var value if the runtime section is empty. - c.Runtime.GoGC = getGoGCEnv() } for _, rf := range c.RuleFiles { @@ -363,18 +400,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { } } - // Do global overrides and validate unique names. + // Do global overrides and validation. jobNames := map[string]struct{}{} for _, scfg := range c.ScrapeConfigs { if err := scfg.Validate(c.GlobalConfig); err != nil { return err } - if _, ok := jobNames[scfg.JobName]; ok { return fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) } jobNames[scfg.JobName] = struct{}{} } + rwNames := map[string]struct{}{} for _, rwcfg := range c.RemoteWriteConfigs { if rwcfg == nil { @@ -418,6 +455,8 @@ type GlobalConfig struct { RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"` // File to which PromQL queries are logged. QueryLogFile string `yaml:"query_log_file,omitempty"` + // File to which scrape failures are logged. + ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` // An uncompressed response body larger than this many bytes will cause the @@ -441,6 +480,17 @@ type GlobalConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` + // Allow UTF8 Metric and Label Names. Can be blank in config files but must + // have a value if a GlobalConfig is created programmatically. + MetricNameValidationScheme model.ValidationScheme `yaml:"metric_name_validation_scheme,omitempty"` + // Metric name escaping mode to request through content negotiation. Can be + // blank in config files but must have a value if a ScrapeConfig is created + // programmatically. + MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"` + // Whether to convert all scraped classic histograms into native histograms with custom buckets. + ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` + // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. + AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` } // ScrapeProtocol represents supported protocol for scraping metrics. @@ -461,15 +511,30 @@ func (s ScrapeProtocol) Validate() error { return nil } +// HeaderMediaType returns the MIME mediaType for a particular ScrapeProtocol. +func (s ScrapeProtocol) HeaderMediaType() string { + if _, ok := ScrapeProtocolsHeaders[s]; !ok { + return "" + } + mediaType, _, err := mime.ParseMediaType(ScrapeProtocolsHeaders[s]) + if err != nil { + return "" + } + return mediaType +} + var ( PrometheusProto ScrapeProtocol = "PrometheusProto" PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" + PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0" OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" + UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8 ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", PrometheusText0_0_4: "text/plain;version=0.0.4", + PrometheusText1_0_0: "text/plain;version=1.0.0", OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1", OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", } @@ -479,6 +544,7 @@ var ( DefaultScrapeProtocols = []ScrapeProtocol{ OpenMetricsText1_0_0, OpenMetricsText0_0_1, + PrometheusText1_0_0, PrometheusText0_0_4, } @@ -490,6 +556,7 @@ var ( PrometheusProto, OpenMetricsText1_0_0, OpenMetricsText0_0_1, + PrometheusText1_0_0, PrometheusText0_0_4, } ) @@ -515,6 +582,7 @@ func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error { // SetDirectory joins any relative file paths with dir. func (c *GlobalConfig) SetDirectory(dir string) { c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile) + c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -577,13 +645,33 @@ func (c *GlobalConfig) isZero() bool { c.EvaluationInterval == 0 && c.RuleQueryOffset == 0 && c.QueryLogFile == "" && - c.ScrapeProtocols == nil + c.ScrapeFailureLogFile == "" && + c.ScrapeProtocols == nil && + !c.ConvertClassicHistogramsToNHCB && + !c.AlwaysScrapeClassicHistograms } +const DefaultGoGCPercentage = 75 + // RuntimeConfig configures the values for the process behavior. type RuntimeConfig struct { // The Go garbage collection target percentage. GoGC int `yaml:"gogc,omitempty"` + + // Below are guidelines for adding a new field: + // + // For config that shouldn't change after startup, you might want to use + // flags https://prometheus.io/docs/prometheus/latest/command-line/prometheus/. + // + // Consider when the new field is first applied: at the very beginning of instance + // startup, after the TSDB is loaded etc. See https://github.com/prometheus/prometheus/pull/16491 + // for an example. + // + // Provide a test covering various scenarios: empty config file, empty or incomplete runtime + // config block, precedence over other inputs (e.g., env vars, if applicable) etc. + // See TestRuntimeGOGCConfig (or https://github.com/prometheus/prometheus/pull/15238). + // The test should also verify behavior on reloads, since this config should be + // adjustable at runtime. } // isZero returns true iff the global config is the zero value. @@ -614,10 +702,19 @@ type ScrapeConfig struct { // The protocols to negotiate during a scrape. It tells clients what // protocol are accepted by Prometheus and with what preference (most wanted is first). // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, - // OpenMetricsText1.0.0, PrometheusText0.0.4. + // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4. ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` - // Whether to scrape a classic histogram that is also exposed as a native histogram. - ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` + // The fallback protocol to use if the Content-Type provided by the target + // is not provided, blank, or not one of the expected values. + // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, + // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4. + ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"` + // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. + AlwaysScrapeClassicHistograms *bool `yaml:"always_scrape_classic_histograms,omitempty"` + // Whether to convert all scraped classic histograms into a native histogram with custom buckets. + ConvertClassicHistogramsToNHCB *bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` + // File to which scrape failures are logged. + ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. @@ -651,6 +748,13 @@ type ScrapeConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` + // Allow UTF8 Metric and Label Names. Can be blank in config files but must + // have a value if a ScrapeConfig is created programmatically. + MetricNameValidationScheme model.ValidationScheme `yaml:"metric_name_validation_scheme,omitempty"` + // Metric name escaping mode to request through content negotiation. Can be + // blank in config files but must have a value if a ScrapeConfig is created + // programmatically. + MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -668,6 +772,7 @@ type ScrapeConfig struct { func (c *ScrapeConfig) SetDirectory(dir string) { c.ServiceDiscoveryConfigs.SetDirectory(dir) c.HTTPClientConfig.SetDirectory(dir) + c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -749,6 +854,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c.KeepDroppedTargets == 0 { c.KeepDroppedTargets = globalConfig.KeepDroppedTargets } + if c.ScrapeFailureLogFile == "" { + c.ScrapeFailureLogFile = globalConfig.ScrapeFailureLogFile + } if c.ScrapeProtocols == nil { c.ScrapeProtocols = globalConfig.ScrapeProtocols @@ -757,6 +865,70 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName) } + if c.ScrapeFallbackProtocol != "" { + if err := c.ScrapeFallbackProtocol.Validate(); err != nil { + return fmt.Errorf("invalid fallback_scrape_protocol for scrape config with job name %q: %w", c.JobName, err) + } + } + + //nolint:staticcheck + if model.NameValidationScheme != model.UTF8Validation { + return errors.New("model.NameValidationScheme must be set to UTF8") + } + + switch globalConfig.MetricNameValidationScheme { + case model.UnsetValidation: + globalConfig.MetricNameValidationScheme = model.UTF8Validation + case model.LegacyValidation, model.UTF8Validation: + default: + return fmt.Errorf("unknown global name validation method specified, must be either '', 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) + } + // Scrapeconfig validation scheme matches global if left blank. + switch c.MetricNameValidationScheme { + case model.UnsetValidation: + c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme + case model.LegacyValidation, model.UTF8Validation: + default: + return fmt.Errorf("unknown scrape config name validation method specified, must be either '', 'legacy' or 'utf8', got %s", c.MetricNameValidationScheme) + } + + // Escaping scheme is based on the validation scheme if left blank. + switch globalConfig.MetricNameEscapingScheme { + case "": + if globalConfig.MetricNameValidationScheme == model.LegacyValidation { + globalConfig.MetricNameEscapingScheme = model.EscapeUnderscores + } else { + globalConfig.MetricNameEscapingScheme = model.AllowUTF8 + } + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + default: + return fmt.Errorf("unknown global name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %q", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, globalConfig.MetricNameEscapingScheme) + } + + if c.MetricNameEscapingScheme == "" { + c.MetricNameEscapingScheme = globalConfig.MetricNameEscapingScheme + } + + switch c.MetricNameEscapingScheme { + case model.AllowUTF8: + if c.MetricNameValidationScheme != model.UTF8Validation { + return errors.New("utf8 metric names requested but validation scheme is not set to UTF8") + } + case model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + default: + return fmt.Errorf("unknown scrape config name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %q", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, c.MetricNameEscapingScheme) + } + + if c.ConvertClassicHistogramsToNHCB == nil { + global := globalConfig.ConvertClassicHistogramsToNHCB + c.ConvertClassicHistogramsToNHCB = &global + } + + if c.AlwaysScrapeClassicHistograms == nil { + global := globalConfig.AlwaysScrapeClassicHistograms + c.AlwaysScrapeClassicHistograms = &global + } + return nil } @@ -765,6 +937,35 @@ func (c *ScrapeConfig) MarshalYAML() (interface{}, error) { return discovery.MarshalYAMLWithInlineConfigs(c) } +// ToEscapingScheme wraps the equivalent common library function with the +// desired default behavior based on the given validation scheme. This is a +// workaround for third party exporters that don't set the escaping scheme. +func ToEscapingScheme(s string, v model.ValidationScheme) (model.EscapingScheme, error) { + if s == "" { + switch v { + case model.UTF8Validation: + return model.NoEscaping, nil + case model.LegacyValidation: + return model.UnderscoreEscaping, nil + case model.UnsetValidation: + return model.NoEscaping, fmt.Errorf("v is unset: %s", v) + default: + panic(fmt.Errorf("unhandled validation scheme: %s", v)) + } + } + return model.ToEscapingScheme(s) +} + +// ConvertClassicHistogramsToNHCBEnabled returns whether to convert classic histograms to NHCB. +func (c *ScrapeConfig) ConvertClassicHistogramsToNHCBEnabled() bool { + return c.ConvertClassicHistogramsToNHCB != nil && *c.ConvertClassicHistogramsToNHCB +} + +// AlwaysScrapeClassicHistogramsEnabled returns whether to always scrape classic histograms. +func (c *ScrapeConfig) AlwaysScrapeClassicHistogramsEnabled() bool { + return c.AlwaysScrapeClassicHistograms != nil && *c.AlwaysScrapeClassicHistograms +} + // StorageConfig configures runtime reloadable configuration options. type StorageConfig struct { TSDBConfig *TSDBConfig `yaml:"tsdb,omitempty"` @@ -919,6 +1120,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig { // AlertmanagerAPIVersion represents a version of the // github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'. +// 'v1' is no longer supported. type AlertmanagerAPIVersion string // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -929,13 +1131,11 @@ func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(interface{}) error return err } - for _, supportedVersion := range SupportedAlertmanagerAPIVersions { - if *v == supportedVersion { - return nil - } + if !slices.Contains(SupportedAlertmanagerAPIVersions, *v) { + return fmt.Errorf("expected Alertmanager api version to be one of %v but got %v", SupportedAlertmanagerAPIVersions, *v) } - return fmt.Errorf("expected Alertmanager api version to be one of %v but got %v", SupportedAlertmanagerAPIVersions, *v) + return nil } const ( @@ -948,7 +1148,7 @@ const ( ) var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{ - AlertmanagerAPIVersionV1, AlertmanagerAPIVersionV2, + AlertmanagerAPIVersionV2, } // AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with. @@ -1000,7 +1200,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil if httpClientConfigAuthEnabled && c.SigV4Config != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") + return errors.New("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") } // Check for users putting URLs in target groups. @@ -1085,8 +1285,9 @@ func (m RemoteWriteProtoMsgs) String() string { } var ( - // RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf - // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/. + // RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf + // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/, + // which will eventually be deprecated. // // NOTE: This string is used for both HTTP header values and config value, so don't change // this reference. @@ -1108,6 +1309,7 @@ type RemoteWriteConfig struct { Name string `yaml:"name,omitempty"` SendExemplars bool `yaml:"send_exemplars,omitempty"` SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"` + RoundRobinDNS bool `yaml:"round_robin_dns,omitempty"` // ProtobufMessage specifies the protobuf message to use against the remote // receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/ ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"` @@ -1119,6 +1321,7 @@ type RemoteWriteConfig struct { MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"` + GoogleIAMConfig *googleiam.Config `yaml:"google_iam,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -1156,17 +1359,33 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err return err } - httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || - c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil + return validateAuthConfigs(c) +} - if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") +// validateAuthConfigs validates that at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. +func validateAuthConfigs(c *RemoteWriteConfig) error { + var authConfigured []string + if c.HTTPClientConfig.BasicAuth != nil { + authConfigured = append(authConfigured, "basic_auth") } - - if c.SigV4Config != nil && c.AzureADConfig != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") + if c.HTTPClientConfig.Authorization != nil { + authConfigured = append(authConfigured, "authorization") + } + if c.HTTPClientConfig.OAuth2 != nil { + authConfigured = append(authConfigured, "oauth2") + } + if c.SigV4Config != nil { + authConfigured = append(authConfigured, "sigv4") + } + if c.AzureADConfig != nil { + authConfigured = append(authConfigured, "azuread") + } + if c.GoogleIAMConfig != nil { + authConfigured = append(authConfigured, "google_iam") + } + if len(authConfigured) > 1 { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. Currently configured: %v", authConfigured) } - return nil } @@ -1185,7 +1404,7 @@ func validateHeadersForTracing(headers map[string]string) error { func validateHeaders(headers map[string]string) error { for header := range headers { if strings.ToLower(header) == "authorization" { - return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter") + return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter") } if _, ok := reservedHeaders[strings.ToLower(header)]; ok { return fmt.Errorf("%s is a reserved header. It must not be changed", header) @@ -1233,13 +1452,20 @@ type MetadataConfig struct { MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"` } +const ( + // DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows. + // 50MB is the default. This is equivalent to ~100k full XOR chunks and average labelset. + DefaultChunkedReadLimit = 5e+7 +) + // RemoteReadConfig is the configuration for reading from remote storage. type RemoteReadConfig struct { - URL *config.URL `yaml:"url"` - RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` - Headers map[string]string `yaml:"headers,omitempty"` - ReadRecent bool `yaml:"read_recent,omitempty"` - Name string `yaml:"name,omitempty"` + URL *config.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + ChunkedReadLimit uint64 `yaml:"chunked_read_limit,omitempty"` + Headers map[string]string `yaml:"headers,omitempty"` + ReadRecent bool `yaml:"read_recent,omitempty"` + Name string `yaml:"name,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -1289,7 +1515,7 @@ func fileErr(filename string, err error) error { return fmt.Errorf("%q: %w", filePath(filename), err) } -func getGoGCEnv() int { +func getGoGC() int { goGCEnv := os.Getenv("GOGC") // If the GOGC env var is set, use the same logic as upstream Go. if goGCEnv != "" { @@ -1302,5 +1528,93 @@ func getGoGCEnv() int { return i } } - return DefaultRuntimeConfig.GoGC + return DefaultGoGCPercentage +} + +type translationStrategyOption string + +var ( + // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. + // Unit and type suffixes may be added to metric names, according to certain rules. + NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes" + // UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus. + // This option will translate metric name characters that are not alphanumerics/underscores/colons to underscores, + // and label name characters that are not alphanumerics/underscores to underscores. + // Unit and type suffixes may be appended to metric names, according to certain rules. + UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes" + // NoTranslation (EXPERIMENTAL): disables all translation of incoming metric + // and label names. This offers a way for the OTLP users to use native metric names, reducing confusion. + // + // WARNING: This setting has significant known risks and limitations (see + // https://prometheus.io/docs/practices/naming/ for details): + // * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration). + // * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed + // time series. For instance, you may end up in situation of ingesting `foo.bar` series with unit + // `seconds` and a separate series `foo.bar` with unit `milliseconds`. + // + // As a result, this setting is experimental and currently, should not be used in + // production systems. + // + // TODO(ArthurSens): Mention `type-and-unit-labels` feature (https://github.com/prometheus/proposals/pull/39) once released, as potential mitigation of the above risks. + NoTranslation translationStrategyOption = "NoTranslation" +) + +// OTLPConfig is the configuration for writing to the OTLP endpoint. +type OTLPConfig struct { + PromoteAllResourceAttributes bool `yaml:"promote_all_resource_attributes,omitempty"` + PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` + IgnoreResourceAttributes []string `yaml:"ignore_resource_attributes,omitempty"` + TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"` + KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"` + ConvertHistogramsToNHCB bool `yaml:"convert_histograms_to_nhcb,omitempty"` + // PromoteScopeMetadata controls whether to promote OTel scope metadata (i.e. name, version, schema URL, and attributes) to metric labels. + // As per OTel spec, the aforementioned scope metadata should be identifying, i.e. made into metric labels. + PromoteScopeMetadata bool `yaml:"promote_scope_metadata,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultOTLPConfig + type plain OTLPConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + if c.PromoteAllResourceAttributes { + if len(c.PromoteResourceAttributes) > 0 { + return errors.New("'promote_all_resource_attributes' and 'promote_resource_attributes' cannot be configured simultaneously") + } + if err := sanitizeAttributes(c.IgnoreResourceAttributes, "ignored"); err != nil { + return fmt.Errorf("invalid 'ignore_resource_attributes': %w", err) + } + } else { + if len(c.IgnoreResourceAttributes) > 0 { + return errors.New("'ignore_resource_attributes' cannot be configured unless 'promote_all_resource_attributes' is true") + } + if err := sanitizeAttributes(c.PromoteResourceAttributes, "promoted"); err != nil { + return fmt.Errorf("invalid 'promote_resource_attributes': %w", err) + } + } + + return nil +} + +func sanitizeAttributes(attributes []string, adjective string) error { + seen := map[string]struct{}{} + var err error + for i, attr := range attributes { + attr = strings.TrimSpace(attr) + if attr == "" { + err = errors.Join(err, fmt.Errorf("empty %s OTel resource attribute", adjective)) + continue + } + if _, exists := seen[attr]; exists { + err = errors.Join(err, fmt.Errorf("duplicated %s OTel resource attribute %q", adjective, attr)) + continue + } + + seen[attr] = struct{}{} + attributes[i] = attr + } + return err } diff --git a/config/config_default_test.go b/config/config_default_test.go index 31133f1e04..e5f43e1f50 100644 --- a/config/config_default_test.go +++ b/config/config_default_test.go @@ -18,8 +18,11 @@ package config const ruleFilesConfigFile = "testdata/rules_abs_path.good.yml" var ruleFilesExpectedConf = &Config{ + loaded: true, + GlobalConfig: DefaultGlobalConfig, Runtime: DefaultRuntimeConfig, + OTLPConfig: DefaultOTLPConfig, RuleFiles: []string{ "testdata/first.rules", "testdata/rules/second.rules", diff --git a/config/config_test.go b/config/config_test.go index 3c4907a46c..0673748b98 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -15,7 +15,7 @@ package config import ( "crypto/tls" - "encoding/json" + "fmt" "net/url" "os" "path/filepath" @@ -23,10 +23,10 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" "github.com/grafana/regexp" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -50,6 +50,7 @@ import ( "github.com/prometheus/prometheus/discovery/ovhcloud" "github.com/prometheus/prometheus/discovery/puppetdb" "github.com/prometheus/prometheus/discovery/scaleway" + "github.com/prometheus/prometheus/discovery/stackit" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/triton" "github.com/prometheus/prometheus/discovery/uyuni" @@ -69,6 +70,10 @@ func mustParseURL(u string) *config.URL { return &config.URL{URL: parsed} } +func boolPtr(b bool) *bool { + return &b +} + const ( globBodySizeLimit = 15 * units.MiB globSampleLimit = 1500 @@ -77,24 +82,29 @@ const ( globLabelNameLengthLimit = 200 globLabelValueLengthLimit = 200 globalGoGC = 42 + globScrapeFailureLogFile = "testdata/fail.log" ) var expectedConf = &Config{ + loaded: true, GlobalConfig: GlobalConfig{ - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EvaluationInterval: model.Duration(30 * time.Second), - QueryLogFile: "", + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EvaluationInterval: model.Duration(30 * time.Second), + QueryLogFile: "testdata/query.log", + ScrapeFailureLogFile: globScrapeFailureLogFile, ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"), - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + AlwaysScrapeClassicHistograms: false, + ConvertClassicHistogramsToNHCB: false, }, Runtime: RuntimeConfig{ @@ -134,7 +144,7 @@ var expectedConf = &Config{ }, }, FollowRedirects: true, - EnableHTTP2: true, + EnableHTTP2: false, }, }, { @@ -150,18 +160,26 @@ var expectedConf = &Config{ KeyFile: filepath.FromSlash("testdata/valid_key_file"), }, FollowRedirects: true, - EnableHTTP2: true, + EnableHTTP2: false, }, Headers: map[string]string{"name": "value"}, }, }, + OTLPConfig: OTLPConfig{ + PromoteResourceAttributes: []string{ + "k8s.cluster.name", "k8s.job.name", "k8s.namespace.name", + }, + TranslationStrategy: UnderscoreEscapingWithSuffixes, + }, + RemoteReadConfigs: []*RemoteReadConfig{ { - URL: mustParseURL("http://remote1/read"), - RemoteTimeout: model.Duration(1 * time.Minute), - ReadRecent: true, - Name: "default", + URL: mustParseURL("http://remote1/read"), + RemoteTimeout: model.Duration(1 * time.Minute), + ChunkedReadLimit: DefaultChunkedReadLimit, + ReadRecent: true, + Name: "default", HTTPClientConfig: config.HTTPClientConfig{ FollowRedirects: true, EnableHTTP2: false, @@ -171,6 +189,7 @@ var expectedConf = &Config{ { URL: mustParseURL("http://remote3/read"), RemoteTimeout: model.Duration(1 * time.Minute), + ChunkedReadLimit: DefaultChunkedReadLimit, ReadRecent: false, Name: "read_special", RequiredMatchers: model.LabelSet{"job": "special"}, @@ -190,18 +209,24 @@ var expectedConf = &Config{ { JobName: "prometheus", - HonorLabels: true, - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorLabels: true, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFallbackProtocol: PrometheusText0_0_4, + ScrapeFailureLogFile: "testdata/fail_prom.log", + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -216,6 +241,15 @@ var expectedConf = &Config{ TLSConfig: config.TLSConfig{ MinVersion: config.TLSVersion(tls.VersionTLS10), }, + HTTPHeaders: &config.Headers{ + Headers: map[string]config.Header{ + "foo": { + Values: []string{"foobar"}, + Secrets: []config.Secret{"bar", "foo"}, + Files: []string{filepath.FromSlash("testdata/valid_password_file")}, + }, + }, + }, }, ServiceDiscoveryConfigs: discovery.Configs{ @@ -294,17 +328,22 @@ var expectedConf = &Config{ { JobName: "service-x", - HonorTimestamps: true, - ScrapeInterval: model.Duration(50 * time.Second), - ScrapeTimeout: model.Duration(5 * time.Second), - EnableCompression: true, - BodySizeLimit: 10 * units.MiB, - SampleLimit: 1000, - TargetLimit: 35, - LabelLimit: 35, - LabelNameLengthLimit: 210, - LabelValueLengthLimit: 210, - ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}, + HonorTimestamps: true, + ScrapeInterval: model.Duration(50 * time.Second), + ScrapeTimeout: model.Duration(5 * time.Second), + EnableCompression: true, + BodySizeLimit: 10 * units.MiB, + SampleLimit: 1000, + TargetLimit: 35, + LabelLimit: 35, + LabelNameLengthLimit: 210, + LabelValueLengthLimit: 210, + ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), HTTPClientConfig: config.HTTPClientConfig{ BasicAuth: &config.BasicAuth{ @@ -391,17 +430,22 @@ var expectedConf = &Config{ { JobName: "service-y", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -446,17 +490,22 @@ var expectedConf = &Config{ { JobName: "service-z", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: model.Duration(10 * time.Second), - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: "/metrics", Scheme: "http", @@ -479,17 +528,22 @@ var expectedConf = &Config{ { JobName: "service-kubernetes", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -518,17 +572,22 @@ var expectedConf = &Config{ { JobName: "service-kubernetes-namespaces", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -557,17 +616,22 @@ var expectedConf = &Config{ { JobName: "service-kuma", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -586,17 +650,22 @@ var expectedConf = &Config{ { JobName: "service-marathon", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -623,17 +692,22 @@ var expectedConf = &Config{ { JobName: "service-nomad", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -657,17 +731,22 @@ var expectedConf = &Config{ { JobName: "service-ec2", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -698,17 +777,22 @@ var expectedConf = &Config{ { JobName: "service-lightsail", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -729,17 +813,22 @@ var expectedConf = &Config{ { JobName: "service-azure", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -763,17 +852,22 @@ var expectedConf = &Config{ { JobName: "service-nerve", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -790,17 +884,22 @@ var expectedConf = &Config{ { JobName: "0123service-xxx", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -820,17 +919,22 @@ var expectedConf = &Config{ { JobName: "badfederation", - HonorTimestamps: false, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: false, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: "/federate", Scheme: DefaultScrapeConfig.Scheme, @@ -850,17 +954,22 @@ var expectedConf = &Config{ { JobName: "測試", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -880,17 +989,22 @@ var expectedConf = &Config{ { JobName: "httpsd", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -907,17 +1021,22 @@ var expectedConf = &Config{ { JobName: "service-triton", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -942,17 +1061,22 @@ var expectedConf = &Config{ { JobName: "digitalocean-droplets", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -976,17 +1100,22 @@ var expectedConf = &Config{ { JobName: "docker", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1007,17 +1136,22 @@ var expectedConf = &Config{ { JobName: "dockerswarm", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1037,17 +1171,22 @@ var expectedConf = &Config{ { JobName: "service-openstack", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1071,17 +1210,22 @@ var expectedConf = &Config{ { JobName: "service-puppetdb", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1107,18 +1251,23 @@ var expectedConf = &Config{ }, }, { - JobName: "hetzner", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + JobName: "hetzner", + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1164,17 +1313,22 @@ var expectedConf = &Config{ { JobName: "service-eureka", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1191,17 +1345,22 @@ var expectedConf = &Config{ { JobName: "ovhcloud", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1229,17 +1388,22 @@ var expectedConf = &Config{ { JobName: "scaleway", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1273,17 +1437,22 @@ var expectedConf = &Config{ { JobName: "linode-instances", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1305,20 +1474,64 @@ var expectedConf = &Config{ }, }, }, + { + JobName: "stackit-servers", + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + HTTPClientConfig: config.DefaultHTTPClientConfig, + ServiceDiscoveryConfigs: discovery.Configs{ + &stackit.SDConfig{ + Project: "11111111-1111-1111-1111-111111111111", + Region: "eu01", + HTTPClientConfig: config.HTTPClientConfig{ + Authorization: &config.Authorization{ + Type: "Bearer", + Credentials: "abcdef", + }, + FollowRedirects: true, + EnableHTTP2: true, + }, + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + }, + }, + }, { JobName: "uyuni", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1336,18 +1549,24 @@ var expectedConf = &Config{ }, }, { - JobName: "ionos", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + JobName: "ionos", + + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1369,17 +1588,22 @@ var expectedConf = &Config{ { JobName: "vultr", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EnableCompression: true, - BodySizeLimit: globBodySizeLimit, - SampleLimit: globSampleLimit, - TargetLimit: globTargetLimit, - LabelLimit: globLabelLimit, - LabelNameLengthLimit: globLabelNameLengthLimit, - LabelValueLengthLimit: globLabelValueLengthLimit, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EnableCompression: true, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, + MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme, + MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1444,46 +1668,295 @@ var expectedConf = &Config{ }, } +func TestYAMLNotLongerSupportedAMApi(t *testing.T) { + _, err := LoadFile("testdata/config_with_no_longer_supported_am_api_config.yml", false, promslog.NewNopLogger()) + require.Error(t, err) +} + func TestYAMLRoundtrip(t *testing.T) { - want, err := LoadFile("testdata/roundtrip.good.yml", false, false, log.NewNopLogger()) + want, err := LoadFile("testdata/roundtrip.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) - require.NoError(t, err) - got := &Config{} - require.NoError(t, yaml.UnmarshalStrict(out, got)) + + got, err := Load(string(out), promslog.NewNopLogger()) + require.NoError(t, err) require.Equal(t, want, got) } func TestRemoteWriteRetryOnRateLimit(t *testing.T) { - want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, log.NewNopLogger()) + want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) - require.NoError(t, err) - got := &Config{} - require.NoError(t, yaml.UnmarshalStrict(out, got)) + + got, err := Load(string(out), promslog.NewNopLogger()) + require.NoError(t, err) require.True(t, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit) require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit) } +func TestOTLPSanitizeResourceAttributes(t *testing.T) { + t.Run("good config - default resource attributes", func(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_default_resource_attributes.good.yml"), false, promslog.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) + + require.False(t, got.OTLPConfig.PromoteAllResourceAttributes) + require.Empty(t, got.OTLPConfig.IgnoreResourceAttributes) + require.Empty(t, got.OTLPConfig.PromoteResourceAttributes) + }) + + t.Run("good config - promote resource attributes", func(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_promote_resource_attributes.good.yml"), false, promslog.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) + + require.False(t, got.OTLPConfig.PromoteAllResourceAttributes) + require.Empty(t, got.OTLPConfig.IgnoreResourceAttributes) + require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.PromoteResourceAttributes) + }) + + t.Run("bad config - promote resource attributes", func(t *testing.T) { + _, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_promote_resource_attributes.bad.yml"), false, promslog.NewNopLogger()) + require.ErrorContains(t, err, `invalid 'promote_resource_attributes'`) + require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`) + require.ErrorContains(t, err, `empty promoted OTel resource attribute`) + }) + + t.Run("good config - promote all resource attributes", func(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes_promote_all.good.yml"), false, promslog.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) + require.True(t, got.OTLPConfig.PromoteAllResourceAttributes) + require.Empty(t, got.OTLPConfig.PromoteResourceAttributes) + require.Empty(t, got.OTLPConfig.IgnoreResourceAttributes) + }) + + t.Run("good config - ignore resource attributes", func(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_ignore_resource_attributes.good.yml"), false, promslog.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) + require.True(t, got.OTLPConfig.PromoteAllResourceAttributes) + require.Empty(t, got.OTLPConfig.PromoteResourceAttributes) + require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.IgnoreResourceAttributes) + }) + + t.Run("bad config - ignore resource attributes", func(t *testing.T) { + _, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_ignore_resource_attributes.bad.yml"), false, promslog.NewNopLogger()) + require.ErrorContains(t, err, `invalid 'ignore_resource_attributes'`) + require.ErrorContains(t, err, `duplicated ignored OTel resource attribute "k8s.job.name"`) + require.ErrorContains(t, err, `empty ignored OTel resource attribute`) + }) + + t.Run("bad config - conflict between promote all and promote specific resource attributes", func(t *testing.T) { + _, err := LoadFile(filepath.Join("testdata", "otlp_promote_all_resource_attributes.bad.yml"), false, promslog.NewNopLogger()) + require.ErrorContains(t, err, `'promote_all_resource_attributes' and 'promote_resource_attributes' cannot be configured simultaneously`) + }) + + t.Run("bad config - configuring ignoring of resource attributes without also enabling promotion of all resource attributes", func(t *testing.T) { + _, err := LoadFile(filepath.Join("testdata", "otlp_ignore_resource_attributes_without_promote_all.bad.yml"), false, promslog.NewNopLogger()) + require.ErrorContains(t, err, `'ignore_resource_attributes' cannot be configured unless 'promote_all_resource_attributes' is true`) + }) +} + +func TestOTLPAllowServiceNameInTargetInfo(t *testing.T) { + t.Run("good config", func(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_allow_keep_identifying_resource_attributes.good.yml"), false, promslog.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) + + require.True(t, got.OTLPConfig.KeepIdentifyingResourceAttributes) + }) +} + +func TestOTLPConvertHistogramsToNHCB(t *testing.T) { + t.Run("good config", func(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_convert_histograms_to_nhcb.good.yml"), false, promslog.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) + + require.True(t, got.OTLPConfig.ConvertHistogramsToNHCB) + }) +} + +func TestOTLPPromoteScopeMetadata(t *testing.T) { + t.Run("good config", func(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_promote_scope_metadata.good.yml"), false, promslog.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) + + require.True(t, got.OTLPConfig.PromoteScopeMetadata) + }) +} + +func TestOTLPAllowUTF8(t *testing.T) { + t.Run("good config - NoUTF8EscapingWithSuffixes", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml") + verify := func(t *testing.T, conf *Config, err error) { + t.Helper() + require.NoError(t, err) + require.Equal(t, NoUTF8EscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy) + } + + t.Run("LoadFile", func(t *testing.T) { + conf, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, conf, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + conf, err := Load(string(content), promslog.NewNopLogger()) + verify(t, conf, err) + }) + }) + + t.Run("incompatible config - NoUTF8EscapingWithSuffixes", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml") + verify := func(t *testing.T, err error) { + t.Helper() + require.ErrorContains(t, err, `OTLP translation strategy "NoUTF8EscapingWithSuffixes" is not allowed when UTF8 is disabled`) + } + + t.Run("LoadFile", func(t *testing.T) { + _, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + _, err = Load(string(content), promslog.NewNopLogger()) + t.Log("err", err) + verify(t, err) + }) + }) + + t.Run("good config - NoTranslation", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_no_translation.good.yml") + verify := func(t *testing.T, conf *Config, err error) { + t.Helper() + require.NoError(t, err) + require.Equal(t, NoTranslation, conf.OTLPConfig.TranslationStrategy) + } + + t.Run("LoadFile", func(t *testing.T) { + conf, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, conf, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + conf, err := Load(string(content), promslog.NewNopLogger()) + verify(t, conf, err) + }) + }) + + t.Run("incompatible config - NoTranslation", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_no_translation.incompatible.yml") + verify := func(t *testing.T, err error) { + t.Helper() + require.ErrorContains(t, err, `OTLP translation strategy "NoTranslation" is not allowed when UTF8 is disabled`) + } + + t.Run("LoadFile", func(t *testing.T) { + _, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + _, err = Load(string(content), promslog.NewNopLogger()) + t.Log("err", err) + verify(t, err) + }) + }) + + t.Run("bad config", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.bad.yml") + verify := func(t *testing.T, err error) { + t.Helper() + require.ErrorContains(t, err, `unsupported OTLP translation strategy "Invalid"`) + } + + t.Run("LoadFile", func(t *testing.T) { + _, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + _, err = Load(string(content), promslog.NewNopLogger()) + verify(t, err) + }) + }) + + t.Run("good config - missing otlp config uses default", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_empty.yml") + verify := func(t *testing.T, conf *Config, err error) { + t.Helper() + require.NoError(t, err) + require.Equal(t, UnderscoreEscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy) + } + + t.Run("LoadFile", func(t *testing.T) { + conf, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, conf, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + conf, err := Load(string(content), promslog.NewNopLogger()) + verify(t, conf, err) + }) + }) +} + func TestLoadConfig(t *testing.T) { // Parse a valid file that sets a global scrape timeout. This tests whether parsing // an overwritten default field in the global config permanently changes the default. - _, err := LoadFile("testdata/global_timeout.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/global_timeout.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) - c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) + c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger()) + require.NoError(t, err) require.Equal(t, expectedConf, c) } func TestScrapeIntervalLarger(t *testing.T) { - c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger()) + c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) require.Len(t, c.ScrapeConfigs, 1) for _, sc := range c.ScrapeConfigs { @@ -1493,7 +1966,7 @@ func TestScrapeIntervalLarger(t *testing.T) { // YAML marshaling must not reveal authentication credentials. func TestElideSecrets(t *testing.T) { - c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) + c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) secretRe := regexp.MustCompile(`\\u003csecret\\u003e|`) @@ -1503,38 +1976,38 @@ func TestElideSecrets(t *testing.T) { yamlConfig := string(config) matches := secretRe.FindAllStringIndex(yamlConfig, -1) - require.Len(t, matches, 22, "wrong number of secret matches found") + require.Len(t, matches, 25, "wrong number of secret matches found") require.NotContains(t, yamlConfig, "mysecret", "yaml marshal reveals authentication credentials.") } func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) { // Parse a valid file that sets a rule files with an absolute path - c, err := LoadFile(ruleFilesConfigFile, false, false, log.NewNopLogger()) + c, err := LoadFile(ruleFilesConfigFile, false, promslog.NewNopLogger()) require.NoError(t, err) require.Equal(t, ruleFilesExpectedConf, c) } func TestKubernetesEmptyAPIServer(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) } func TestKubernetesWithKubeConfig(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) } func TestKubernetesSelectors(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) } @@ -1556,11 +2029,7 @@ var expectedErrors = []struct { }, { filename: "labelname.bad.yml", - errMsg: `"not$allowed" is not a valid label name`, - }, - { - filename: "labelname2.bad.yml", - errMsg: `"not:allowed" is not a valid label name`, + errMsg: `"\xff" is not a valid label name`, }, { filename: "labelvalue.bad.yml", @@ -1632,16 +2101,12 @@ var expectedErrors = []struct { }, { filename: "labelmap.bad.yml", - errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action", + errMsg: "!!binary value contains invalid base64 data", }, { filename: "lowercase.bad.yml", errMsg: "relabel configuration for lowercase action requires 'target_label' value", }, - { - filename: "lowercase2.bad.yml", - errMsg: "\"42lab\" is invalid 'target_label' for lowercase action", - }, { filename: "lowercase3.bad.yml", errMsg: "'replacement' can not be set for lowercase action", @@ -1650,10 +2115,6 @@ var expectedErrors = []struct { filename: "uppercase.bad.yml", errMsg: "relabel configuration for uppercase action requires 'target_label' value", }, - { - filename: "uppercase2.bad.yml", - errMsg: "\"42lab\" is invalid 'target_label' for uppercase action", - }, { filename: "uppercase3.bad.yml", errMsg: "'replacement' can not be set for uppercase action", @@ -1800,7 +2261,7 @@ var expectedErrors = []struct { }, { filename: "remote_write_authorization_header.bad.yml", - errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`, + errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter`, }, { filename: "remote_write_wrong_msg.bad.yml", @@ -2004,31 +2465,38 @@ var expectedErrors = []struct { }, { filename: "scrape_config_files_scrape_protocols.bad.yml", - errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4] for scrape config with job name "node"`, + errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0] for scrape config with job name "node"`, }, { filename: "scrape_config_files_scrape_protocols2.bad.yml", errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`, }, + { + filename: "scrape_config_files_fallback_scrape_protocol1.bad.yml", + errMsg: `parsing YAML file testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml: invalid fallback_scrape_protocol for scrape config with job name "node": unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0]`, + }, + { + filename: "scrape_config_files_fallback_scrape_protocol2.bad.yml", + errMsg: `unmarshal errors`, + }, + { + filename: "scrape_config_utf8_conflicting.bad.yml", + errMsg: `utf8 metric names requested but validation scheme is not set to UTF8`, + }, + { + filename: "stackit_endpoint.bad.yml", + errMsg: "invalid endpoint", + }, } func TestBadConfigs(t *testing.T) { for _, ee := range expectedErrors { - _, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger()) - require.Error(t, err, "%s", ee.filename) - require.Contains(t, err.Error(), ee.errMsg, + _, err := LoadFile("testdata/"+ee.filename, false, promslog.NewNopLogger()) + require.ErrorContains(t, err, ee.errMsg, "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) } } -func TestBadStaticConfigsJSON(t *testing.T) { - content, err := os.ReadFile("testdata/static_config.bad.json") - require.NoError(t, err) - var tg targetgroup.Group - err = json.Unmarshal(content, &tg) - require.Error(t, err) -} - func TestBadStaticConfigsYML(t *testing.T) { content, err := os.ReadFile("testdata/static_config.bad.yml") require.NoError(t, err) @@ -2038,48 +2506,46 @@ func TestBadStaticConfigsYML(t *testing.T) { } func TestEmptyConfig(t *testing.T) { - c, err := Load("", false, log.NewNopLogger()) + c, err := Load("", promslog.NewNopLogger()) require.NoError(t, err) exp := DefaultConfig + exp.loaded = true require.Equal(t, exp, *c) + require.Equal(t, 75, c.Runtime.GoGC) } func TestExpandExternalLabels(t *testing.T) { // Cleanup ant TEST env variable that could exist on the system. os.Setenv("TEST", "") - c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger()) - require.NoError(t, err) - testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels) - - c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) + c, err := LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) os.Setenv("TEST", "TestValue") - c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) + c, err = LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) } func TestAgentMode(t *testing.T) { - _, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, log.NewNopLogger()) + _, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, promslog.NewNopLogger()) require.ErrorContains(t, err, "field alerting is not allowed in agent mode") - _, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, log.NewNopLogger()) + _, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, promslog.NewNopLogger()) require.ErrorContains(t, err, "field alerting is not allowed in agent mode") - _, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, log.NewNopLogger()) + _, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, promslog.NewNopLogger()) require.ErrorContains(t, err, "field rule_files is not allowed in agent mode") - _, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, log.NewNopLogger()) + _, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, promslog.NewNopLogger()) require.ErrorContains(t, err, "field remote_read is not allowed in agent mode") - c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger()) + c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, promslog.NewNopLogger()) require.NoError(t, err) require.Empty(t, c.RemoteWriteConfigs) - c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger()) + c, err = LoadFile("testdata/agent_mode.good.yml", true, promslog.NewNopLogger()) require.NoError(t, err) require.Len(t, c.RemoteWriteConfigs, 1) require.Equal( @@ -2090,21 +2556,33 @@ func TestAgentMode(t *testing.T) { } func TestEmptyGlobalBlock(t *testing.T) { - c, err := Load("global:\n", false, log.NewNopLogger()) + c, err := Load("global:\n", promslog.NewNopLogger()) require.NoError(t, err) exp := DefaultConfig - exp.Runtime = DefaultRuntimeConfig + exp.loaded = true require.Equal(t, exp, *c) } +// ScrapeConfigOptions contains options for creating a scrape config. +type ScrapeConfigOptions struct { + JobName string + ScrapeInterval model.Duration + ScrapeTimeout model.Duration + AlwaysScrapeClassicHistograms bool + ConvertClassicHistToNHCB bool +} + func TestGetScrapeConfigs(t *testing.T) { - sc := func(jobName string, scrapeInterval, scrapeTimeout model.Duration) *ScrapeConfig { + // Helper function to create a scrape config with the given options. + sc := func(opts ScrapeConfigOptions) *ScrapeConfig { return &ScrapeConfig{ - JobName: jobName, - HonorTimestamps: true, - ScrapeInterval: scrapeInterval, - ScrapeTimeout: scrapeTimeout, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + JobName: opts.JobName, + HonorTimestamps: true, + ScrapeInterval: opts.ScrapeInterval, + ScrapeTimeout: opts.ScrapeTimeout, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + MetricNameValidationScheme: model.UTF8Validation, + MetricNameEscapingScheme: model.AllowUTF8, MetricsPath: "/metrics", Scheme: "http", @@ -2122,6 +2600,8 @@ func TestGetScrapeConfigs(t *testing.T) { }, }, }, + AlwaysScrapeClassicHistograms: boolPtr(opts.AlwaysScrapeClassicHistograms), + ConvertClassicHistogramsToNHCB: boolPtr(opts.ConvertClassicHistToNHCB), } } @@ -2134,33 +2614,37 @@ func TestGetScrapeConfigs(t *testing.T) { { name: "An included config file should be a valid global config.", configFile: "testdata/scrape_config_files.good.yml", - expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second))}, + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})}, }, { - name: "An global config that only include a scrape config file.", + name: "A global config that only include a scrape config file.", configFile: "testdata/scrape_config_files_only.good.yml", - expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second))}, + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})}, }, { - name: "An global config that combine scrape config files and scrape configs.", + name: "A global config that combine scrape config files and scrape configs.", configFile: "testdata/scrape_config_files_combined.good.yml", expectedResult: []*ScrapeConfig{ - sc("node", model.Duration(60*time.Second), model.Duration(10*time.Second)), - sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second)), - sc("alertmanager", model.Duration(60*time.Second), model.Duration(10*time.Second)), + sc(ScrapeConfigOptions{JobName: "node", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}), + sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}), + sc(ScrapeConfigOptions{JobName: "alertmanager", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}), }, }, { - name: "An global config that includes a scrape config file with globs", + name: "A global config that includes a scrape config file with globs", configFile: "testdata/scrape_config_files_glob.good.yml", expectedResult: []*ScrapeConfig{ { JobName: "prometheus", - HonorTimestamps: true, - ScrapeInterval: model.Duration(60 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(60 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + MetricNameValidationScheme: model.UTF8Validation, + MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -2190,10 +2674,14 @@ func TestGetScrapeConfigs(t *testing.T) { { JobName: "node", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + MetricNameValidationScheme: model.UTF8Validation, + MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), + ConvertClassicHistogramsToNHCB: boolPtr(false), HTTPClientConfig: config.HTTPClientConfig{ TLSConfig: config.TLSConfig{ @@ -2227,25 +2715,60 @@ func TestGetScrapeConfigs(t *testing.T) { }, }, { - name: "An global config that includes twice the same scrape configs.", + name: "A global config that includes twice the same scrape configs.", configFile: "testdata/scrape_config_files_double_import.bad.yml", expectedError: `found multiple scrape configs with job name "prometheus"`, }, { - name: "An global config that includes a scrape config identical to a scrape config in the main file.", + name: "A global config that includes a scrape config identical to a scrape config in the main file.", configFile: "testdata/scrape_config_files_duplicate.bad.yml", expectedError: `found multiple scrape configs with job name "prometheus"`, }, { - name: "An global config that includes a scrape config file with errors.", + name: "A global config that includes a scrape config file with errors.", configFile: "testdata/scrape_config_files_global.bad.yml", expectedError: `scrape timeout greater than scrape interval for scrape config with job name "prometheus"`, }, + { + name: "A global config that enables convert classic histograms to nhcb.", + configFile: "testdata/global_convert_classic_hist_to_nhcb.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: true})}, + }, + { + name: "A global config that enables convert classic histograms to nhcb and scrape config that disables the conversion", + configFile: "testdata/local_disable_convert_classic_hist_to_nhcb.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})}, + }, + { + name: "A global config that disables convert classic histograms to nhcb and scrape config that enables the conversion", + configFile: "testdata/local_convert_classic_hist_to_nhcb.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: true})}, + }, + { + name: "A global config that enables always scrape classic histograms", + configFile: "testdata/global_enable_always_scrape_classic_hist.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: true, ConvertClassicHistToNHCB: false})}, + }, + { + name: "A global config that disables always scrape classic histograms", + configFile: "testdata/global_disable_always_scrape_classic_hist.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})}, + }, + { + name: "A global config that disables always scrape classic histograms and scrape config that enables it", + configFile: "testdata/local_enable_always_scrape_classic_hist.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: true, ConvertClassicHistToNHCB: false})}, + }, + { + name: "A global config that enables always scrape classic histograms and scrape config that disables it", + configFile: "testdata/local_disable_always_scrape_classic_hist.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})}, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - c, err := LoadFile(tc.configFile, false, false, log.NewNopLogger()) + c, err := LoadFile(tc.configFile, false, promslog.NewNopLogger()) require.NoError(t, err) scfgs, err := c.GetScrapeConfigs() @@ -2263,7 +2786,7 @@ func kubernetesSDHostURL() config.URL { } func TestScrapeConfigDisableCompression(t *testing.T) { - want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, log.NewNopLogger()) + want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -2274,3 +2797,163 @@ func TestScrapeConfigDisableCompression(t *testing.T) { require.False(t, got.ScrapeConfigs[0].EnableCompression) } + +func TestScrapeConfigNameValidationSettings(t *testing.T) { + tests := []struct { + name string + inputFile string + expectScheme model.ValidationScheme + }{ + { + name: "blank config implies default", + inputFile: "scrape_config_default_validation_mode", + expectScheme: model.UTF8Validation, + }, + { + name: "global setting implies local settings", + inputFile: "scrape_config_global_validation_mode", + expectScheme: model.LegacyValidation, + }, + { + name: "local setting", + inputFile: "scrape_config_local_validation_mode", + expectScheme: model.LegacyValidation, + }, + { + name: "local setting overrides global setting", + inputFile: "scrape_config_local_global_validation_mode", + expectScheme: model.UTF8Validation, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, promslog.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + + require.NoError(t, err) + got := &Config{} + require.NoError(t, yaml.UnmarshalStrict(out, got)) + + require.Equal(t, tc.expectScheme, got.ScrapeConfigs[0].MetricNameValidationScheme) + }) + } +} + +func TestScrapeConfigNameEscapingSettings(t *testing.T) { + tests := []struct { + name string + inputFile string + expectValidationScheme model.ValidationScheme + expectEscapingScheme string + }{ + { + name: "blank config implies default", + inputFile: "scrape_config_default_validation_mode", + expectValidationScheme: model.UTF8Validation, + expectEscapingScheme: "allow-utf-8", + }, + { + name: "global setting implies local settings", + inputFile: "scrape_config_global_validation_mode", + expectValidationScheme: model.LegacyValidation, + expectEscapingScheme: "dots", + }, + { + name: "local setting", + inputFile: "scrape_config_local_validation_mode", + expectValidationScheme: model.LegacyValidation, + expectEscapingScheme: "values", + }, + { + name: "local setting overrides global setting", + inputFile: "scrape_config_local_global_validation_mode", + expectValidationScheme: model.UTF8Validation, + expectEscapingScheme: "dots", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, promslog.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + + require.NoError(t, err) + got := &Config{} + require.NoError(t, yaml.UnmarshalStrict(out, got)) + + require.Equal(t, tc.expectValidationScheme, got.ScrapeConfigs[0].MetricNameValidationScheme) + require.Equal(t, tc.expectEscapingScheme, got.ScrapeConfigs[0].MetricNameEscapingScheme) + }) + } +} + +func TestScrapeProtocolHeader(t *testing.T) { + tests := []struct { + name string + proto ScrapeProtocol + expectedValue string + }{ + { + name: "blank", + proto: ScrapeProtocol(""), + expectedValue: "", + }, + { + name: "invalid", + proto: ScrapeProtocol("invalid"), + expectedValue: "", + }, + { + name: "prometheus protobuf", + proto: PrometheusProto, + expectedValue: "application/vnd.google.protobuf", + }, + { + name: "prometheus text 0.0.4", + proto: PrometheusText0_0_4, + expectedValue: "text/plain", + }, + { + name: "prometheus text 1.0.0", + proto: PrometheusText1_0_0, + expectedValue: "text/plain", + }, + { + name: "openmetrics 0.0.1", + proto: OpenMetricsText0_0_1, + expectedValue: "application/openmetrics-text", + }, + { + name: "openmetrics 1.0.0", + proto: OpenMetricsText1_0_0, + expectedValue: "application/openmetrics-text", + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mediaType := tc.proto.HeaderMediaType() + + require.Equal(t, tc.expectedValue, mediaType) + }) + } +} + +// Regression test against https://github.com/prometheus/prometheus/issues/15538 +func TestGetScrapeConfigs_Loaded(t *testing.T) { + t.Run("without load", func(t *testing.T) { + c := &Config{} + _, err := c.GetScrapeConfigs() + require.EqualError(t, err, "scrape config cannot be fetched, main config was not validated and loaded correctly; should not happen") + }) + t.Run("with load", func(t *testing.T) { + c, err := Load("", promslog.NewNopLogger()) + require.NoError(t, err) + _, err = c.GetScrapeConfigs() + require.NoError(t, err) + }) +} diff --git a/config/config_windows_test.go b/config/config_windows_test.go index db4d46ef13..9d338b99e7 100644 --- a/config/config_windows_test.go +++ b/config/config_windows_test.go @@ -16,6 +16,8 @@ package config const ruleFilesConfigFile = "testdata/rules_abs_path_windows.good.yml" var ruleFilesExpectedConf = &Config{ + loaded: true, + GlobalConfig: DefaultGlobalConfig, Runtime: DefaultRuntimeConfig, RuleFiles: []string{ diff --git a/config/reload.go b/config/reload.go new file mode 100644 index 0000000000..cc0cc97158 --- /dev/null +++ b/config/reload.go @@ -0,0 +1,93 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "os" + "path/filepath" + + promconfig "github.com/prometheus/common/config" + "gopkg.in/yaml.v2" +) + +type ExternalFilesConfig struct { + RuleFiles []string `yaml:"rule_files"` + ScrapeConfigFiles []string `yaml:"scrape_config_files"` +} + +// GenerateChecksum generates a checksum of the YAML file and the files it references. +func GenerateChecksum(yamlFilePath string) (string, error) { + hash := sha256.New() + + yamlContent, err := os.ReadFile(yamlFilePath) + if err != nil { + return "", fmt.Errorf("error reading YAML file: %w", err) + } + _, err = hash.Write(yamlContent) + if err != nil { + return "", fmt.Errorf("error writing YAML file to hash: %w", err) + } + + var config ExternalFilesConfig + if err := yaml.Unmarshal(yamlContent, &config); err != nil { + return "", fmt.Errorf("error unmarshalling YAML: %w", err) + } + + dir := filepath.Dir(yamlFilePath) + + for i, file := range config.RuleFiles { + config.RuleFiles[i] = promconfig.JoinDir(dir, file) + } + for i, file := range config.ScrapeConfigFiles { + config.ScrapeConfigFiles[i] = promconfig.JoinDir(dir, file) + } + + files := map[string][]string{ + "r": config.RuleFiles, // "r" for rule files + "s": config.ScrapeConfigFiles, // "s" for scrape config files + } + + for _, prefix := range []string{"r", "s"} { + for _, pattern := range files[prefix] { + matchingFiles, err := filepath.Glob(pattern) + if err != nil { + return "", fmt.Errorf("error finding files with pattern %q: %w", pattern, err) + } + + for _, file := range matchingFiles { + // Write prefix to the hash ("r" or "s") followed by \0, then + // the file path. + _, err = hash.Write([]byte(prefix + "\x00" + file + "\x00")) + if err != nil { + return "", fmt.Errorf("error writing %q path to hash: %w", file, err) + } + + // Read and hash the content of the file. + content, err := os.ReadFile(file) + if err != nil { + return "", fmt.Errorf("error reading file %s: %w", file, err) + } + _, err = hash.Write(append(content, []byte("\x00")...)) + if err != nil { + return "", fmt.Errorf("error writing %q content to hash: %w", file, err) + } + } + } + } + + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/config/reload_test.go b/config/reload_test.go new file mode 100644 index 0000000000..3e77260ab3 --- /dev/null +++ b/config/reload_test.go @@ -0,0 +1,246 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGenerateChecksum(t *testing.T) { + tmpDir := t.TempDir() + + // Define paths for the temporary files. + yamlFilePath := filepath.Join(tmpDir, "test.yml") + ruleFile := "rule_file.yml" + ruleFilePath := filepath.Join(tmpDir, ruleFile) + scrapeConfigFile := "scrape_config.yml" + scrapeConfigFilePath := filepath.Join(tmpDir, scrapeConfigFile) + + // Define initial and modified content for the files. + originalRuleContent := "groups:\n- name: example\n rules:\n - alert: ExampleAlert" + modifiedRuleContent := "groups:\n- name: example\n rules:\n - alert: ModifiedAlert" + + originalScrapeConfigContent := "scrape_configs:\n- job_name: example" + modifiedScrapeConfigContent := "scrape_configs:\n- job_name: modified_example" + + testCases := []struct { + name string + ruleFilePath string + scrapeConfigFilePath string + }{ + { + name: "Auto reload using relative path.", + ruleFilePath: ruleFile, + scrapeConfigFilePath: scrapeConfigFile, + }, + { + name: "Auto reload using absolute path.", + ruleFilePath: ruleFilePath, + scrapeConfigFilePath: scrapeConfigFilePath, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Define YAML content referencing the rule and scrape config files. + yamlContent := fmt.Sprintf(` +rule_files: + - %s +scrape_config_files: + - %s +`, tc.ruleFilePath, tc.scrapeConfigFilePath) + + // Write initial content to files. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Generate the original checksum. + originalChecksum := calculateChecksum(t, yamlFilePath) + + t.Run("Rule File Change", func(t *testing.T) { + // Modify the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(modifiedRuleContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Scrape Config Change", func(t *testing.T) { + // Modify the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(modifiedScrapeConfigContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Rule File Deletion", func(t *testing.T) { + // Delete the rule file. + require.NoError(t, os.Remove(ruleFilePath)) + + // Checksum should change. + deletedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, deletedChecksum) + + // Restore the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Scrape Config Deletion", func(t *testing.T) { + // Delete the scrape config file. + require.NoError(t, os.Remove(scrapeConfigFilePath)) + + // Checksum should change. + deletedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, deletedChecksum) + + // Restore the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Main File Change", func(t *testing.T) { + // Modify the main YAML file. + modifiedYamlContent := fmt.Sprintf(` +global: + scrape_interval: 3s +rule_files: + - %s +scrape_config_files: + - %s +`, tc.ruleFilePath, tc.scrapeConfigFilePath) + require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the main YAML file. + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Rule File Removed from YAML Config", func(t *testing.T) { + // Modify the YAML content to remove the rule file. + modifiedYamlContent := fmt.Sprintf(` +scrape_config_files: + - %s +`, tc.scrapeConfigFilePath) + require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the YAML content. + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Scrape Config Removed from YAML Config", func(t *testing.T) { + // Modify the YAML content to remove the scrape config file. + modifiedYamlContent := fmt.Sprintf(` +rule_files: + - %s +`, tc.ruleFilePath) + require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644)) + + // Checksum should change. + modifiedChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, modifiedChecksum) + + // Revert the YAML content. + require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Empty Rule File", func(t *testing.T) { + // Write an empty rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(""), 0o644)) + + // Checksum should change. + emptyChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, emptyChecksum) + + // Restore the rule file. + require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + + t.Run("Empty Scrape Config File", func(t *testing.T) { + // Write an empty scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(""), 0o644)) + + // Checksum should change. + emptyChecksum := calculateChecksum(t, yamlFilePath) + require.NotEqual(t, originalChecksum, emptyChecksum) + + // Restore the scrape config file. + require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644)) + + // Checksum should return to the original. + revertedChecksum := calculateChecksum(t, yamlFilePath) + require.Equal(t, originalChecksum, revertedChecksum) + }) + }) + } +} + +// calculateChecksum generates a checksum for the given YAML file path. +func calculateChecksum(t *testing.T, yamlFilePath string) string { + checksum, err := GenerateChecksum(yamlFilePath) + require.NoError(t, err) + require.NotEmpty(t, checksum) + return checksum +} diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 0e0aa2bd5d..cbe80404bf 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -8,6 +8,8 @@ global: label_limit: 30 label_name_length_limit: 200 label_value_length_limit: 200 + query_log_file: query.log + scrape_failure_log_file: fail.log # scrape_timeout is set to the global default (10s). external_labels: @@ -45,6 +47,9 @@ remote_write: headers: name: value +otlp: + promote_resource_attributes: ["k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"] + remote_read: - url: http://remote1/read read_recent: true @@ -69,6 +74,9 @@ scrape_configs: # metrics_path defaults to '/metrics' # scheme defaults to 'http'. + fallback_scrape_protocol: PrometheusText0.0.4 + + scrape_failure_log_file: fail_prom.log file_sd_configs: - files: - foo/*.slow.json @@ -84,6 +92,12 @@ scrape_configs: my: label your: label + http_headers: + foo: + values: ["foobar"] + secrets: ["bar", "foo"] + files: ["valid_password_file"] + relabel_configs: - source_labels: [job, __meta_dns_name] regex: (.*)some-[regex] @@ -403,6 +417,12 @@ scrape_configs: - authorization: credentials: abcdef + - job_name: stackit-servers + stackit_sd_configs: + - project: 11111111-1111-1111-1111-111111111111 + authorization: + credentials: abcdef + - job_name: uyuni uyuni_sd_configs: - server: https://localhost:1234 diff --git a/config/testdata/config_with_deprecated_am_api_config.yml b/config/testdata/config_with_deprecated_am_api_config.yml new file mode 100644 index 0000000000..ac89537ff1 --- /dev/null +++ b/config/testdata/config_with_deprecated_am_api_config.yml @@ -0,0 +1,7 @@ +alerting: + alertmanagers: + - scheme: http + api_version: v1 + file_sd_configs: + - files: + - nonexistent_file.yml diff --git a/config/testdata/global_convert_classic_hist_to_nhcb.good.yml b/config/testdata/global_convert_classic_hist_to_nhcb.good.yml new file mode 100644 index 0000000000..c97b7597af --- /dev/null +++ b/config/testdata/global_convert_classic_hist_to_nhcb.good.yml @@ -0,0 +1,6 @@ +global: + convert_classic_histograms_to_nhcb: true +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/global_disable_always_scrape_classic_hist.good.yml b/config/testdata/global_disable_always_scrape_classic_hist.good.yml new file mode 100644 index 0000000000..de28f1357a --- /dev/null +++ b/config/testdata/global_disable_always_scrape_classic_hist.good.yml @@ -0,0 +1,6 @@ +global: + always_scrape_classic_histograms: false +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/global_enable_always_scrape_classic_hist.good.yml b/config/testdata/global_enable_always_scrape_classic_hist.good.yml new file mode 100644 index 0000000000..d42cf69cb6 --- /dev/null +++ b/config/testdata/global_enable_always_scrape_classic_hist.good.yml @@ -0,0 +1,6 @@ +global: + always_scrape_classic_histograms: true +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/jobname_dup.bad.yml b/config/testdata/jobname_dup.bad.yml index 0265493c30..d03cb0cf97 100644 --- a/config/testdata/jobname_dup.bad.yml +++ b/config/testdata/jobname_dup.bad.yml @@ -1,4 +1,6 @@ # Two scrape configs with the same job names are not allowed. +global: + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus - job_name: service-x diff --git a/config/testdata/labelmap.bad.yml b/config/testdata/labelmap.bad.yml index 29d2653990..b8aa117acf 100644 --- a/config/testdata/labelmap.bad.yml +++ b/config/testdata/labelmap.bad.yml @@ -2,4 +2,4 @@ scrape_configs: - job_name: prometheus relabel_configs: - action: labelmap - replacement: l-$1 + replacement: !!binary "/w==$1" diff --git a/config/testdata/labelname.bad.yml b/config/testdata/labelname.bad.yml index c06853a26b..0c9c5ef3b2 100644 --- a/config/testdata/labelname.bad.yml +++ b/config/testdata/labelname.bad.yml @@ -1,3 +1,3 @@ global: external_labels: - not$allowed: value + !!binary "/w==": value diff --git a/config/testdata/labelname2.bad.yml b/config/testdata/labelname2.bad.yml deleted file mode 100644 index 7afcd6bcfc..0000000000 --- a/config/testdata/labelname2.bad.yml +++ /dev/null @@ -1,3 +0,0 @@ -global: - external_labels: - 'not:allowed': value diff --git a/config/testdata/local_convert_classic_hist_to_nhcb.good.yml b/config/testdata/local_convert_classic_hist_to_nhcb.good.yml new file mode 100644 index 0000000000..0fd31727eb --- /dev/null +++ b/config/testdata/local_convert_classic_hist_to_nhcb.good.yml @@ -0,0 +1,7 @@ +global: + convert_classic_histograms_to_nhcb: false +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] + convert_classic_histograms_to_nhcb: true diff --git a/config/testdata/local_disable_always_scrape_classic_hist.good.yml b/config/testdata/local_disable_always_scrape_classic_hist.good.yml new file mode 100644 index 0000000000..9e668340dc --- /dev/null +++ b/config/testdata/local_disable_always_scrape_classic_hist.good.yml @@ -0,0 +1,7 @@ +global: + always_scrape_classic_histograms: true +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] + always_scrape_classic_histograms: false diff --git a/config/testdata/local_disable_convert_classic_hist_to_nhcb.good.yml b/config/testdata/local_disable_convert_classic_hist_to_nhcb.good.yml new file mode 100644 index 0000000000..b41af7e0a5 --- /dev/null +++ b/config/testdata/local_disable_convert_classic_hist_to_nhcb.good.yml @@ -0,0 +1,7 @@ +global: + convert_classic_histograms_to_nhcb: true +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] + convert_classic_histograms_to_nhcb: false diff --git a/config/testdata/local_enable_always_scrape_classic_hist.good.yml b/config/testdata/local_enable_always_scrape_classic_hist.good.yml new file mode 100644 index 0000000000..165be07754 --- /dev/null +++ b/config/testdata/local_enable_always_scrape_classic_hist.good.yml @@ -0,0 +1,7 @@ +global: + always_scrape_classic_histograms: false +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] + always_scrape_classic_histograms: true diff --git a/config/testdata/lowercase.bad.yml b/config/testdata/lowercase.bad.yml index 9bc9583341..6dd72e6476 100644 --- a/config/testdata/lowercase.bad.yml +++ b/config/testdata/lowercase.bad.yml @@ -1,3 +1,5 @@ +global: + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus relabel_configs: diff --git a/config/testdata/lowercase2.bad.yml b/config/testdata/lowercase2.bad.yml deleted file mode 100644 index bde8862c66..0000000000 --- a/config/testdata/lowercase2.bad.yml +++ /dev/null @@ -1,6 +0,0 @@ -scrape_configs: - - job_name: prometheus - relabel_configs: - - action: lowercase - source_labels: [__name__] - target_label: 42lab diff --git a/config/testdata/otlp_allow_keep_identifying_resource_attributes.good.yml b/config/testdata/otlp_allow_keep_identifying_resource_attributes.good.yml new file mode 100644 index 0000000000..63151e2a77 --- /dev/null +++ b/config/testdata/otlp_allow_keep_identifying_resource_attributes.good.yml @@ -0,0 +1,2 @@ +otlp: + keep_identifying_resource_attributes: true diff --git a/config/testdata/otlp_allow_utf8.bad.yml b/config/testdata/otlp_allow_utf8.bad.yml new file mode 100644 index 0000000000..488e4b0558 --- /dev/null +++ b/config/testdata/otlp_allow_utf8.bad.yml @@ -0,0 +1,4 @@ +global: + metric_name_validation_scheme: legacy +otlp: + translation_strategy: Invalid diff --git a/config/testdata/otlp_allow_utf8.good.yml b/config/testdata/otlp_allow_utf8.good.yml new file mode 100644 index 0000000000..f3069d2fdd --- /dev/null +++ b/config/testdata/otlp_allow_utf8.good.yml @@ -0,0 +1,2 @@ +otlp: + translation_strategy: NoUTF8EscapingWithSuffixes diff --git a/config/testdata/otlp_allow_utf8.incompatible.yml b/config/testdata/otlp_allow_utf8.incompatible.yml new file mode 100644 index 0000000000..2625c24131 --- /dev/null +++ b/config/testdata/otlp_allow_utf8.incompatible.yml @@ -0,0 +1,4 @@ +global: + metric_name_validation_scheme: legacy +otlp: + translation_strategy: NoUTF8EscapingWithSuffixes diff --git a/config/testdata/otlp_convert_histograms_to_nhcb.good.yml b/config/testdata/otlp_convert_histograms_to_nhcb.good.yml new file mode 100644 index 0000000000..1462cafe9b --- /dev/null +++ b/config/testdata/otlp_convert_histograms_to_nhcb.good.yml @@ -0,0 +1,2 @@ +otlp: + convert_histograms_to_nhcb: true diff --git a/config/testdata/otlp_empty.yml b/config/testdata/otlp_empty.yml new file mode 100644 index 0000000000..7085e9246b --- /dev/null +++ b/config/testdata/otlp_empty.yml @@ -0,0 +1 @@ +global: diff --git a/config/testdata/otlp_ignore_resource_attributes_without_promote_all.bad.yml b/config/testdata/otlp_ignore_resource_attributes_without_promote_all.bad.yml new file mode 100644 index 0000000000..be4ee60f2a --- /dev/null +++ b/config/testdata/otlp_ignore_resource_attributes_without_promote_all.bad.yml @@ -0,0 +1,2 @@ +otlp: + ignore_resource_attributes: ["k8s.job.name"] diff --git a/config/testdata/otlp_no_translation.good.yml b/config/testdata/otlp_no_translation.good.yml new file mode 100644 index 0000000000..e5c4460842 --- /dev/null +++ b/config/testdata/otlp_no_translation.good.yml @@ -0,0 +1,2 @@ +otlp: + translation_strategy: NoTranslation diff --git a/config/testdata/otlp_no_translation.incompatible.yml b/config/testdata/otlp_no_translation.incompatible.yml new file mode 100644 index 0000000000..33c5a756f5 --- /dev/null +++ b/config/testdata/otlp_no_translation.incompatible.yml @@ -0,0 +1,4 @@ +global: + metric_name_validation_scheme: legacy +otlp: + translation_strategy: NoTranslation diff --git a/config/testdata/otlp_promote_all_resource_attributes.bad.yml b/config/testdata/otlp_promote_all_resource_attributes.bad.yml new file mode 100644 index 0000000000..2be54ec155 --- /dev/null +++ b/config/testdata/otlp_promote_all_resource_attributes.bad.yml @@ -0,0 +1,3 @@ +otlp: + promote_all_resource_attributes: true + promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name", "k8s.job.name"] diff --git a/config/testdata/otlp_promote_scope_metadata.good.yml b/config/testdata/otlp_promote_scope_metadata.good.yml new file mode 100644 index 0000000000..d88f657abd --- /dev/null +++ b/config/testdata/otlp_promote_scope_metadata.good.yml @@ -0,0 +1,2 @@ +otlp: + promote_scope_metadata: true diff --git a/config/testdata/otlp_sanitize_default_resource_attributes.good.yml b/config/testdata/otlp_sanitize_default_resource_attributes.good.yml new file mode 100644 index 0000000000..abdd98dc7a --- /dev/null +++ b/config/testdata/otlp_sanitize_default_resource_attributes.good.yml @@ -0,0 +1 @@ +otlp: diff --git a/config/testdata/otlp_sanitize_ignore_resource_attributes.bad.yml b/config/testdata/otlp_sanitize_ignore_resource_attributes.bad.yml new file mode 100644 index 0000000000..57ce0efac0 --- /dev/null +++ b/config/testdata/otlp_sanitize_ignore_resource_attributes.bad.yml @@ -0,0 +1,3 @@ +otlp: + promote_all_resource_attributes: true + ignore_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name", "k8s.job.name", ""] diff --git a/config/testdata/otlp_sanitize_ignore_resource_attributes.good.yml b/config/testdata/otlp_sanitize_ignore_resource_attributes.good.yml new file mode 100644 index 0000000000..7a50fef405 --- /dev/null +++ b/config/testdata/otlp_sanitize_ignore_resource_attributes.good.yml @@ -0,0 +1,3 @@ +otlp: + promote_all_resource_attributes: true + ignore_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name"] diff --git a/config/testdata/otlp_sanitize_promote_resource_attributes.bad.yml b/config/testdata/otlp_sanitize_promote_resource_attributes.bad.yml new file mode 100644 index 0000000000..37ec5d1209 --- /dev/null +++ b/config/testdata/otlp_sanitize_promote_resource_attributes.bad.yml @@ -0,0 +1,2 @@ +otlp: + promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name", "k8s.job.name", ""] diff --git a/config/testdata/otlp_sanitize_promote_resource_attributes.good.yml b/config/testdata/otlp_sanitize_promote_resource_attributes.good.yml new file mode 100644 index 0000000000..67247e7743 --- /dev/null +++ b/config/testdata/otlp_sanitize_promote_resource_attributes.good.yml @@ -0,0 +1,2 @@ +otlp: + promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name"] diff --git a/config/testdata/otlp_sanitize_resource_attributes_promote_all.good.yml b/config/testdata/otlp_sanitize_resource_attributes_promote_all.good.yml new file mode 100644 index 0000000000..2c8360011b --- /dev/null +++ b/config/testdata/otlp_sanitize_resource_attributes_promote_all.good.yml @@ -0,0 +1,2 @@ +otlp: + promote_all_resource_attributes: true diff --git a/config/testdata/scrape_config_default_validation_mode.yml b/config/testdata/scrape_config_default_validation_mode.yml new file mode 100644 index 0000000000..96680d6438 --- /dev/null +++ b/config/testdata/scrape_config_default_validation_mode.yml @@ -0,0 +1,2 @@ +scrape_configs: + - job_name: prometheus diff --git a/config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml b/config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml new file mode 100644 index 0000000000..07cfe47594 --- /dev/null +++ b/config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: node + fallback_scrape_protocol: "prometheusproto" + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml b/config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml new file mode 100644 index 0000000000..c5d133f9c4 --- /dev/null +++ b/config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: node + fallback_scrape_protocol: ["OpenMetricsText1.0.0", "PrometheusText0.0.4"] + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/scrape_config_global_validation_mode.yml b/config/testdata/scrape_config_global_validation_mode.yml new file mode 100644 index 0000000000..fb4baf7b07 --- /dev/null +++ b/config/testdata/scrape_config_global_validation_mode.yml @@ -0,0 +1,5 @@ +global: + metric_name_validation_scheme: legacy + metric_name_escaping_scheme: dots +scrape_configs: + - job_name: prometheus diff --git a/config/testdata/scrape_config_local_global_validation_mode.yml b/config/testdata/scrape_config_local_global_validation_mode.yml new file mode 100644 index 0000000000..29cd2b4140 --- /dev/null +++ b/config/testdata/scrape_config_local_global_validation_mode.yml @@ -0,0 +1,7 @@ +global: + metric_name_validation_scheme: legacy + metric_name_escaping_scheme: values +scrape_configs: + - job_name: prometheus + metric_name_validation_scheme: utf8 + metric_name_escaping_scheme: dots diff --git a/config/testdata/scrape_config_local_validation_mode.yml b/config/testdata/scrape_config_local_validation_mode.yml new file mode 100644 index 0000000000..b4d1ff05df --- /dev/null +++ b/config/testdata/scrape_config_local_validation_mode.yml @@ -0,0 +1,4 @@ +scrape_configs: + - job_name: prometheus + metric_name_validation_scheme: legacy + metric_name_escaping_scheme: values diff --git a/config/testdata/scrape_config_utf8_conflicting.bad.yml b/config/testdata/scrape_config_utf8_conflicting.bad.yml new file mode 100644 index 0000000000..3f1b8f87ac --- /dev/null +++ b/config/testdata/scrape_config_utf8_conflicting.bad.yml @@ -0,0 +1,5 @@ +global: + metric_name_validation_scheme: legacy + metric_name_escaping_scheme: allow-utf-8 +scrape_configs: + - job_name: prometheus diff --git a/config/testdata/stackit_endpoint.bad.yml b/config/testdata/stackit_endpoint.bad.yml new file mode 100644 index 0000000000..ecb0fefe9e --- /dev/null +++ b/config/testdata/stackit_endpoint.bad.yml @@ -0,0 +1,4 @@ +scrape_configs: + - job_name: stackit + stackit_sd_configs: + - endpoint: "://invalid" diff --git a/config/testdata/static_config.bad.json b/config/testdata/static_config.bad.json deleted file mode 100644 index 6050ed9c50..0000000000 --- a/config/testdata/static_config.bad.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "targets": ["1.2.3.4:9100"], - "labels": { - "some_valid_label": "foo", - "oops:this-label-is-invalid": "bar" - } -} diff --git a/config/testdata/static_config.bad.yml b/config/testdata/static_config.bad.yml index 1d229ec5e7..7e9003dcbf 100644 --- a/config/testdata/static_config.bad.yml +++ b/config/testdata/static_config.bad.yml @@ -1,4 +1,4 @@ targets: ['1.2.3.4:9001', '1.2.3.5:9090'] labels: valid_label: foo - not:valid_label: bar + !!binary "/w==": bar diff --git a/config/testdata/uppercase2.bad.yml b/config/testdata/uppercase2.bad.yml deleted file mode 100644 index 330b9aceb6..0000000000 --- a/config/testdata/uppercase2.bad.yml +++ /dev/null @@ -1,6 +0,0 @@ -scrape_configs: - - job_name: prometheus - relabel_configs: - - action: uppercase - source_labels: [__name__] - target_label: 42lab diff --git a/console_libraries/menu.lib b/console_libraries/menu.lib deleted file mode 100644 index 199ebf9f48..0000000000 --- a/console_libraries/menu.lib +++ /dev/null @@ -1,82 +0,0 @@ -{{/* vim: set ft=html: */}} - -{{/* Navbar, should be passed . */}} -{{ define "navbar" }} - -{{ end }} - -{{/* LHS menu, should be passed . */}} -{{ define "menu" }} -
- -
-{{ end }} - -{{/* Helper, pass (args . path name) */}} -{{ define "_menuItem" }} - -{{ end }} - diff --git a/console_libraries/prom.lib b/console_libraries/prom.lib deleted file mode 100644 index d7d436f947..0000000000 --- a/console_libraries/prom.lib +++ /dev/null @@ -1,138 +0,0 @@ -{{/* vim: set ft=html: */}} -{{/* Load Prometheus console library JS/CSS. Should go in */}} -{{ define "prom_console_head" }} - - - - - - - - - - - - - -{{ end }} - -{{/* Top of all pages. */}} -{{ define "head" -}} - - - -{{ template "prom_console_head" }} - - -{{ template "navbar" . }} - -{{ template "menu" . }} -{{ end }} - -{{ define "__prom_query_drilldown_noop" }}{{ . }}{{ end }} -{{ define "humanize" }}{{ humanize . }}{{ end }} -{{ define "humanizeNoSmallPrefix" }}{{ if and (lt . 1.0) (gt . -1.0) }}{{ printf "%.3g" . }}{{ else }}{{ humanize . }}{{ end }}{{ end }} -{{ define "humanize1024" }}{{ humanize1024 . }}{{ end }} -{{ define "humanizeDuration" }}{{ humanizeDuration . }}{{ end }} -{{ define "humanizePercentage" }}{{ humanizePercentage . }}{{ end }} -{{ define "humanizeTimestamp" }}{{ humanizeTimestamp . }}{{ end }} -{{ define "printf.1f" }}{{ printf "%.1f" . }}{{ end }} -{{ define "printf.3g" }}{{ printf "%.3g" . }}{{ end }} - -{{/* prom_query_drilldown (args expr suffix? renderTemplate?) -Displays the result of the expression, with a link to /graph for it. - -renderTemplate is the name of the template to use to render the value. -*/}} -{{ define "prom_query_drilldown" }} -{{ $expr := .arg0 }}{{ $suffix := (or .arg1 "") }}{{ $renderTemplate := (or .arg2 "__prom_query_drilldown_noop") }} -{{ with query $expr }}{{tmpl $renderTemplate ( . | first | value )}}{{ $suffix }}{{ else }}-{{ end }} -{{ end }} - -{{ define "prom_path" }}/consoles/{{ .Path }}?{{ range $param, $value := .Params }}{{ $param }}={{ $value }}&{{ end }}{{ end }}" - -{{ define "prom_right_table_head" }} -
- -{{ end }} -{{ define "prom_right_table_tail" }} -
-
-{{ end }} - -{{/* RHS table head, pass job name. Should be used after prom_right_table_head. */}} -{{ define "prom_right_table_job_head" }} - - {{ . }} - {{ template "prom_query_drilldown" (args (printf "sum(up{job='%s'})" .)) }} / {{ template "prom_query_drilldown" (args (printf "count(up{job='%s'})" .)) }} - - - CPU - {{ template "prom_query_drilldown" (args (printf "avg by(job)(irate(process_cpu_seconds_total{job='%s'}[5m]))" .) "s/s" "humanizeNoSmallPrefix") }} - - - Memory - {{ template "prom_query_drilldown" (args (printf "avg by(job)(process_resident_memory_bytes{job='%s'})" .) "B" "humanize1024") }} - -{{ end }} - - -{{ define "prom_content_head" }} -
-
-{{ template "prom_graph_timecontrol" . }} -{{ end }} -{{ define "prom_content_tail" }} -
-
-{{ end }} - -{{ define "prom_graph_timecontrol" }} -
-
-
- -
-
- -
-
-
- - - -
-
-
- -
-{{ end }} - -{{/* Bottom of all pages. */}} -{{ define "tail" }} - - -{{ end }} diff --git a/consoles/index.html.example b/consoles/index.html.example deleted file mode 100644 index c725d30dea..0000000000 --- a/consoles/index.html.example +++ /dev/null @@ -1,28 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Overview

-

These are example consoles for Prometheus.

- -

These consoles expect exporters to have the following job labels:

- - - - - - - - - - - - - -
ExporterJob label
Node Exporternode
Prometheusprometheus
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-cpu.html b/consoles/node-cpu.html deleted file mode 100644 index 284ad738f2..0000000000 --- a/consoles/node-cpu.html +++ /dev/null @@ -1,60 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - CPU(s): {{ template "prom_query_drilldown" (args (printf "scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance)) }} - -{{ range printf "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='%s'}[5m])) * 100 / scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance .Params.instance | query | sortByLabel "mode" }} - - {{ .Labels.mode | title }} CPU - {{ .Value | printf "%.1f" }}% - -{{ end }} - Misc - - Processes Running - {{ template "prom_query_drilldown" (args (printf "node_procs_running{job='node',instance='%s'}" .Params.instance) "" "humanize") }} - - - Processes Blocked - {{ template "prom_query_drilldown" (args (printf "node_procs_blocked{job='node',instance='%s'}" .Params.instance) "" "humanize") }} - - - Forks - {{ template "prom_query_drilldown" (args (printf "irate(node_forks_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - Context Switches - {{ template "prom_query_drilldown" (args (printf "irate(node_context_switches_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - Interrupts - {{ template "prom_query_drilldown" (args (printf "irate(node_intr_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} - - - 1m Loadavg - {{ template "prom_query_drilldown" (args (printf "node_load1{job='node',instance='%s'}" .Params.instance)) }} - - - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node CPU - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

CPU Usage

-
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-disk.html b/consoles/node-disk.html deleted file mode 100644 index ffff41b797..0000000000 --- a/consoles/node-disk.html +++ /dev/null @@ -1,78 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Disks - -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} - {{ .Labels.device }} - - Utilization - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }} - - - Throughput - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }} - - - Avg Read Time - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_reads_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }} - - - Avg Write Time - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_write_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_writes_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }} - -{{ end }} - - Filesystem Fullness - -{{ define "roughlyNearZero" }} -{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} -{{ end }} -{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} - - {{ .Labels.mountpoint }} - {{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }} - -{{ end }} - - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node Disk - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

Disk I/O Utilization

-
- -

Filesystem Usage

-
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node-overview.html b/consoles/node-overview.html deleted file mode 100644 index 4ae8984b99..0000000000 --- a/consoles/node-overview.html +++ /dev/null @@ -1,121 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - Overview - - User CPU - {{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }} - - - System CPU - {{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='system'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }} - - - Memory Total - {{ template "prom_query_drilldown" (args (printf "node_memory_MemTotal_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Memory Free - {{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Network - -{{ range printf "node_network_receive_bytes_total{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Received - {{ template "prom_query_drilldown" (args (printf "irate(node_network_receive_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }} - - - {{ .Labels.device }} Transmitted - {{ template "prom_query_drilldown" (args (printf "irate(node_network_transmit_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }} - -{{ end }} - - Disks - -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s',device!~'^(md\\\\d+$|dm-)'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Utilization - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }} - -{{ end }} -{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} - - {{ .Labels.device }} Throughput - {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }} - -{{ end }} - - Filesystem Fullness - -{{ define "roughlyNearZero" }} -{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} -{{ end }} -{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} - - {{ .Labels.mountpoint }} - {{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }} - -{{ end }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

- -

CPU Usage

-
- - -

Disk I/O Utilization

-
- - -

Memory

-
- - -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/node.html b/consoles/node.html deleted file mode 100644 index c1dfc1a891..0000000000 --- a/consoles/node.html +++ /dev/null @@ -1,35 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Node - {{ template "prom_query_drilldown" (args "sum(up{job='node'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='node'})") }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Node

- - - - - - - - -{{ range query "up{job='node'}" | sortByLabel "instance" }} - - - Yes{{ else }} class="alert-danger">No{{ end }} - - - -{{ else }} - -{{ end }} -
NodeUpCPU
Used
Memory
Available
{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance) (sum without(mode) (irate(node_cpu_seconds_total{job='node',mode=~'idle|iowait|steal',instance='%s'}[5m]))))" .Labels.instance) "%" "printf.1f") }}{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}
No nodes found.
- - -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/prometheus-overview.html b/consoles/prometheus-overview.html deleted file mode 100644 index 08e027de06..0000000000 --- a/consoles/prometheus-overview.html +++ /dev/null @@ -1,96 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Overview - - - CPU - {{ template "prom_query_drilldown" (args (printf "irate(process_cpu_seconds_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "s/s" "humanizeNoSmallPrefix") }} - - - Memory - {{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Params.instance) "B" "humanize1024") }} - - - Version - {{ with query (printf "prometheus_build_info{job='prometheus',instance='%s'}" .Params.instance) }}{{. | first | label "version"}}{{end}} - - - - Storage - - - Ingested Samples - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "/s" "humanizeNoSmallPrefix") }} - - - Head Series - {{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_head_series{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - Blocks Loaded - {{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_blocks_loaded{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - Rules - - - Evaluation Duration - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_evaluator_duration_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_evaluator_duration_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }} - - - Notification Latency - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_notifications_latency_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_notifications_latency_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }} - - - Notification Queue - {{ template "prom_query_drilldown" (args (printf "prometheus_notifications_queue_length{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }} - - - HTTP Server - -{{ range printf "prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s'}" .Params.instance | query | sortByLabel "handler" }} - - {{ .Labels.handler }} - {{ template "prom_query_drilldown" (args (printf "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s',handler='%s'}[5m])" .Labels.instance .Labels.handler) "/s" "humanizeNoSmallPrefix") }} - -{{ end }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -
-

Prometheus Overview - {{ .Params.instance }}

- -

Ingested Samples

-
- - -

HTTP Server

-
- -
-{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/consoles/prometheus.html b/consoles/prometheus.html deleted file mode 100644 index e0d026376d..0000000000 --- a/consoles/prometheus.html +++ /dev/null @@ -1,34 +0,0 @@ -{{ template "head" . }} - -{{ template "prom_right_table_head" }} - - Prometheus - {{ template "prom_query_drilldown" (args "sum(up{job='prometheus'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='prometheus'})") }} - -{{ template "prom_right_table_tail" }} - -{{ template "prom_content_head" . }} -

Prometheus

- - - - - - - - -{{ range query "up{job='prometheus'}" | sortByLabel "instance" }} - - - - - - -{{ else }} - -{{ end }} -
PrometheusUpIngested SamplesMemory
{{ .Labels.instance }}Yes{{ else }} class="alert-danger">No{{ end }}{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Labels.instance) "/s" "humanizeNoSmallPrefix") }}{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Labels.instance) "B" "humanize1024")}}
No devices found.
- -{{ template "prom_content_tail" . }} - -{{ template "tail" }} diff --git a/discovery/README.md b/discovery/README.md index 4c06608625..d5418e7fb1 100644 --- a/discovery/README.md +++ b/discovery/README.md @@ -233,7 +233,7 @@ type Config interface { } type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger // A registerer for the Discoverer's metrics. Registerer prometheus.Registerer diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index a44912481a..7e35a1807f 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" @@ -29,11 +30,11 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" @@ -100,7 +101,7 @@ type EC2SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*EC2SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*EC2SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &ec2Metrics{ refreshMetrics: rmi, } @@ -146,9 +147,9 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // the Discoverer interface. type EC2Discovery struct { *refresh.Discovery - logger log.Logger + logger *slog.Logger cfg *EC2SDConfig - ec2 *ec2.EC2 + ec2 ec2iface.EC2API // azToAZID maps this account's availability zones to their underlying AZ // ID, e.g. eu-west-2a -> euw2-az2. Refreshes are performed sequentially, so @@ -157,14 +158,14 @@ type EC2Discovery struct { } // NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets. -func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) { +func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) { m, ok := metrics.(*ec2Metrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } d := &EC2Discovery{ logger: logger, @@ -182,7 +183,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.Dis return d, nil } -func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) { +func (d *EC2Discovery) ec2Client(context.Context) (ec2iface.EC2API, error) { if d.ec2 != nil { return d.ec2, nil } @@ -254,14 +255,14 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error // Prometheus requires a reload if AWS adds a new AZ to the region. if d.azToAZID == nil { if err := d.refreshAZIDs(ctx); err != nil { - level.Debug(d.logger).Log( - "msg", "Unable to describe availability zones", + d.logger.Debug( + "Unable to describe availability zones", "err", err) } } input := &ec2.DescribeInstancesInput{Filters: filters} - if err := ec2Client.DescribeInstancesPagesWithContext(ctx, input, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool { + if err := ec2Client.DescribeInstancesPagesWithContext(ctx, input, func(p *ec2.DescribeInstancesOutput, _ bool) bool { for _, r := range p.Reservations { for _, inst := range r.Instances { if inst.PrivateIpAddress == nil { @@ -296,8 +297,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone) azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone] if !ok && d.azToAZID != nil { - level.Debug(d.logger).Log( - "msg", "Availability zone ID not found", + d.logger.Debug( + "Availability zone ID not found", "az", *inst.Placement.AvailabilityZone) } labels[ec2LabelAZID] = model.LabelValue(azID) diff --git a/discovery/aws/ec2_test.go b/discovery/aws/ec2_test.go new file mode 100644 index 0000000000..2955e0e02e --- /dev/null +++ b/discovery/aws/ec2_test.go @@ -0,0 +1,434 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "context" + "errors" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "go.uber.org/goleak" + + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +// Helper function to get pointers on literals. +// NOTE: this is common between a few tests. In the future it might worth to move this out into a separate package. +func strptr(str string) *string { + return &str +} + +func boolptr(b bool) *bool { + return &b +} + +func int64ptr(i int64) *int64 { + return &i +} + +// Struct for test data. +type ec2DataStore struct { + region string + + azToAZID map[string]string + + ownerID string + + instances []*ec2.Instance +} + +// The tests itself. +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} + +func TestEC2DiscoveryRefreshAZIDs(t *testing.T) { + ctx := context.Background() + + // iterate through the test cases + for _, tt := range []struct { + name string + shouldFail bool + ec2Data *ec2DataStore + }{ + { + name: "Normal", + shouldFail: false, + ec2Data: &ec2DataStore{ + azToAZID: map[string]string{ + "azname-a": "azid-1", + "azname-b": "azid-2", + "azname-c": "azid-3", + }, + }, + }, + { + name: "HandleError", + shouldFail: true, + ec2Data: &ec2DataStore{}, + }, + } { + t.Run(tt.name, func(t *testing.T) { + client := newMockEC2Client(tt.ec2Data) + + d := &EC2Discovery{ + ec2: client, + } + + err := d.refreshAZIDs(ctx) + if tt.shouldFail { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, client.ec2Data.azToAZID, d.azToAZID) + } + }) + } +} + +func TestEC2DiscoveryRefresh(t *testing.T) { + ctx := context.Background() + + // iterate through the test cases + for _, tt := range []struct { + name string + ec2Data *ec2DataStore + expected []*targetgroup.Group + }{ + { + name: "NoPrivateIp", + ec2Data: &ec2DataStore{ + region: "region-noprivateip", + azToAZID: map[string]string{ + "azname-a": "azid-1", + "azname-b": "azid-2", + "azname-c": "azid-3", + }, + instances: []*ec2.Instance{ + { + InstanceId: strptr("instance-id-noprivateip"), + }, + }, + }, + expected: []*targetgroup.Group{ + { + Source: "region-noprivateip", + }, + }, + }, + { + name: "NoVpc", + ec2Data: &ec2DataStore{ + region: "region-novpc", + azToAZID: map[string]string{ + "azname-a": "azid-1", + "azname-b": "azid-2", + "azname-c": "azid-3", + }, + ownerID: "owner-id-novpc", + instances: []*ec2.Instance{ + { + // set every possible options and test them here + Architecture: strptr("architecture-novpc"), + ImageId: strptr("ami-novpc"), + InstanceId: strptr("instance-id-novpc"), + InstanceLifecycle: strptr("instance-lifecycle-novpc"), + InstanceType: strptr("instance-type-novpc"), + Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")}, + Platform: strptr("platform-novpc"), + PrivateDnsName: strptr("private-dns-novpc"), + PrivateIpAddress: strptr("1.2.3.4"), + PublicDnsName: strptr("public-dns-novpc"), + PublicIpAddress: strptr("42.42.42.2"), + State: &ec2.InstanceState{Name: strptr("running")}, + // test tags once and for all + Tags: []*ec2.Tag{ + {Key: strptr("tag-1-key"), Value: strptr("tag-1-value")}, + {Key: strptr("tag-2-key"), Value: strptr("tag-2-value")}, + nil, + {Value: strptr("tag-4-value")}, + {Key: strptr("tag-5-key")}, + }, + }, + }, + }, + expected: []*targetgroup.Group{ + { + Source: "region-novpc", + Targets: []model.LabelSet{ + { + "__address__": model.LabelValue("1.2.3.4:4242"), + "__meta_ec2_ami": model.LabelValue("ami-novpc"), + "__meta_ec2_architecture": model.LabelValue("architecture-novpc"), + "__meta_ec2_availability_zone": model.LabelValue("azname-b"), + "__meta_ec2_availability_zone_id": model.LabelValue("azid-2"), + "__meta_ec2_instance_id": model.LabelValue("instance-id-novpc"), + "__meta_ec2_instance_lifecycle": model.LabelValue("instance-lifecycle-novpc"), + "__meta_ec2_instance_type": model.LabelValue("instance-type-novpc"), + "__meta_ec2_instance_state": model.LabelValue("running"), + "__meta_ec2_owner_id": model.LabelValue("owner-id-novpc"), + "__meta_ec2_platform": model.LabelValue("platform-novpc"), + "__meta_ec2_private_dns_name": model.LabelValue("private-dns-novpc"), + "__meta_ec2_private_ip": model.LabelValue("1.2.3.4"), + "__meta_ec2_public_dns_name": model.LabelValue("public-dns-novpc"), + "__meta_ec2_public_ip": model.LabelValue("42.42.42.2"), + "__meta_ec2_region": model.LabelValue("region-novpc"), + "__meta_ec2_tag_tag_1_key": model.LabelValue("tag-1-value"), + "__meta_ec2_tag_tag_2_key": model.LabelValue("tag-2-value"), + }, + }, + }, + }, + }, + { + name: "Ipv4", + ec2Data: &ec2DataStore{ + region: "region-ipv4", + azToAZID: map[string]string{ + "azname-a": "azid-1", + "azname-b": "azid-2", + "azname-c": "azid-3", + }, + instances: []*ec2.Instance{ + { + // just the minimum needed for the refresh work + ImageId: strptr("ami-ipv4"), + InstanceId: strptr("instance-id-ipv4"), + InstanceType: strptr("instance-type-ipv4"), + Placement: &ec2.Placement{AvailabilityZone: strptr("azname-c")}, + PrivateIpAddress: strptr("5.6.7.8"), + State: &ec2.InstanceState{Name: strptr("running")}, + SubnetId: strptr("azid-3"), + VpcId: strptr("vpc-ipv4"), + // network interfaces + NetworkInterfaces: []*ec2.InstanceNetworkInterface{ + // interface without subnet -> should be ignored + { + Ipv6Addresses: []*ec2.InstanceIpv6Address{ + { + Ipv6Address: strptr("2001:db8:1::1"), + IsPrimaryIpv6: boolptr(true), + }, + }, + }, + // interface with subnet, no IPv6 + { + Ipv6Addresses: []*ec2.InstanceIpv6Address{}, + SubnetId: strptr("azid-3"), + }, + // interface with another subnet, no IPv6 + { + Ipv6Addresses: []*ec2.InstanceIpv6Address{}, + SubnetId: strptr("azid-1"), + }, + }, + }, + }, + }, + expected: []*targetgroup.Group{ + { + Source: "region-ipv4", + Targets: []model.LabelSet{ + { + "__address__": model.LabelValue("5.6.7.8:4242"), + "__meta_ec2_ami": model.LabelValue("ami-ipv4"), + "__meta_ec2_availability_zone": model.LabelValue("azname-c"), + "__meta_ec2_availability_zone_id": model.LabelValue("azid-3"), + "__meta_ec2_instance_id": model.LabelValue("instance-id-ipv4"), + "__meta_ec2_instance_state": model.LabelValue("running"), + "__meta_ec2_instance_type": model.LabelValue("instance-type-ipv4"), + "__meta_ec2_owner_id": model.LabelValue(""), + "__meta_ec2_primary_subnet_id": model.LabelValue("azid-3"), + "__meta_ec2_private_ip": model.LabelValue("5.6.7.8"), + "__meta_ec2_region": model.LabelValue("region-ipv4"), + "__meta_ec2_subnet_id": model.LabelValue(",azid-3,azid-1,"), + "__meta_ec2_vpc_id": model.LabelValue("vpc-ipv4"), + }, + }, + }, + }, + }, + { + name: "Ipv6", + ec2Data: &ec2DataStore{ + region: "region-ipv6", + azToAZID: map[string]string{ + "azname-a": "azid-1", + "azname-b": "azid-2", + "azname-c": "azid-3", + }, + instances: []*ec2.Instance{ + { + // just the minimum needed for the refresh work + ImageId: strptr("ami-ipv6"), + InstanceId: strptr("instance-id-ipv6"), + InstanceType: strptr("instance-type-ipv6"), + Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")}, + PrivateIpAddress: strptr("9.10.11.12"), + State: &ec2.InstanceState{Name: strptr("running")}, + SubnetId: strptr("azid-2"), + VpcId: strptr("vpc-ipv6"), + // network interfaces + NetworkInterfaces: []*ec2.InstanceNetworkInterface{ + // interface without primary IPv6, index 2 + { + Attachment: &ec2.InstanceNetworkInterfaceAttachment{ + DeviceIndex: int64ptr(3), + }, + Ipv6Addresses: []*ec2.InstanceIpv6Address{ + { + Ipv6Address: strptr("2001:db8:2::1:1"), + IsPrimaryIpv6: boolptr(false), + }, + }, + SubnetId: strptr("azid-2"), + }, + // interface with primary IPv6, index 1 + { + Attachment: &ec2.InstanceNetworkInterfaceAttachment{ + DeviceIndex: int64ptr(1), + }, + Ipv6Addresses: []*ec2.InstanceIpv6Address{ + { + Ipv6Address: strptr("2001:db8:2::2:1"), + IsPrimaryIpv6: boolptr(false), + }, + { + Ipv6Address: strptr("2001:db8:2::2:2"), + IsPrimaryIpv6: boolptr(true), + }, + }, + SubnetId: strptr("azid-2"), + }, + // interface with primary IPv6, index 3 + { + Attachment: &ec2.InstanceNetworkInterfaceAttachment{ + DeviceIndex: int64ptr(3), + }, + Ipv6Addresses: []*ec2.InstanceIpv6Address{ + { + Ipv6Address: strptr("2001:db8:2::3:1"), + IsPrimaryIpv6: boolptr(true), + }, + }, + SubnetId: strptr("azid-1"), + }, + // interface without primary IPv6, index 0 + { + Attachment: &ec2.InstanceNetworkInterfaceAttachment{ + DeviceIndex: int64ptr(0), + }, + Ipv6Addresses: []*ec2.InstanceIpv6Address{}, + SubnetId: strptr("azid-3"), + }, + }, + }, + }, + }, + expected: []*targetgroup.Group{ + { + Source: "region-ipv6", + Targets: []model.LabelSet{ + { + "__address__": model.LabelValue("9.10.11.12:4242"), + "__meta_ec2_ami": model.LabelValue("ami-ipv6"), + "__meta_ec2_availability_zone": model.LabelValue("azname-b"), + "__meta_ec2_availability_zone_id": model.LabelValue("azid-2"), + "__meta_ec2_instance_id": model.LabelValue("instance-id-ipv6"), + "__meta_ec2_instance_state": model.LabelValue("running"), + "__meta_ec2_instance_type": model.LabelValue("instance-type-ipv6"), + "__meta_ec2_ipv6_addresses": model.LabelValue(",2001:db8:2::1:1,2001:db8:2::2:1,2001:db8:2::2:2,2001:db8:2::3:1,"), + "__meta_ec2_owner_id": model.LabelValue(""), + "__meta_ec2_primary_ipv6_addresses": model.LabelValue(",,2001:db8:2::2:2,,2001:db8:2::3:1,"), + "__meta_ec2_primary_subnet_id": model.LabelValue("azid-2"), + "__meta_ec2_private_ip": model.LabelValue("9.10.11.12"), + "__meta_ec2_region": model.LabelValue("region-ipv6"), + "__meta_ec2_subnet_id": model.LabelValue(",azid-2,azid-1,azid-3,"), + "__meta_ec2_vpc_id": model.LabelValue("vpc-ipv6"), + }, + }, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + client := newMockEC2Client(tt.ec2Data) + + d := &EC2Discovery{ + ec2: client, + cfg: &EC2SDConfig{ + Port: 4242, + Region: client.ec2Data.region, + }, + } + + g, err := d.refresh(ctx) + require.NoError(t, err) + require.Equal(t, tt.expected, g) + }) + } +} + +// EC2 client mock. +type mockEC2Client struct { + ec2iface.EC2API + ec2Data ec2DataStore +} + +func newMockEC2Client(ec2Data *ec2DataStore) *mockEC2Client { + client := mockEC2Client{ + ec2Data: *ec2Data, + } + return &client +} + +func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(_ aws.Context, _ *ec2.DescribeAvailabilityZonesInput, _ ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error) { + if len(m.ec2Data.azToAZID) == 0 { + return nil, errors.New("No AZs found") + } + + azs := make([]*ec2.AvailabilityZone, len(m.ec2Data.azToAZID)) + + i := 0 + for k, v := range m.ec2Data.azToAZID { + azs[i] = &ec2.AvailabilityZone{ + ZoneName: strptr(k), + ZoneId: strptr(v), + } + i++ + } + + return &ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: azs, + }, nil +} + +func (m *mockEC2Client) DescribeInstancesPagesWithContext(_ aws.Context, _ *ec2.DescribeInstancesInput, fn func(*ec2.DescribeInstancesOutput, bool) bool, _ ...request.Option) error { + r := ec2.Reservation{} + r.SetInstances(m.ec2Data.instances) + r.SetOwnerId(m.ec2Data.ownerID) + + o := ec2.DescribeInstancesOutput{} + o.SetReservations([]*ec2.Reservation{&r}) + + _ = fn(&o, true) + + return nil +} diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index 0ad7f2d541..ff1059ede0 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" @@ -29,10 +30,10 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/lightsail" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" @@ -82,7 +83,7 @@ type LightsailSDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*LightsailSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*LightsailSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &lightsailMetrics{ refreshMetrics: rmi, } @@ -114,6 +115,7 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) err region, err := metadata.Region() if err != nil { + //nolint:staticcheck // Capitalized first word. return errors.New("Lightsail SD configuration requires a region") } c.Region = region @@ -130,14 +132,14 @@ type LightsailDiscovery struct { } // NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets. -func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) { +func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) { m, ok := metrics.(*lightsailMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } d := &LightsailDiscovery{ diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 70d95b9f3a..670afb5a4e 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "math/rand" "net" "net/http" @@ -35,12 +36,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" cache "github.com/Code-Hex/go-generics-cache" "github.com/Code-Hex/go-generics-cache/policy/lru" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" - "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" @@ -70,18 +69,14 @@ const ( authMethodManagedIdentity = "ManagedIdentity" ) -var ( - userAgent = fmt.Sprintf("Prometheus/%s", version.Version) - - // DefaultSDConfig is the default Azure SD configuration. - DefaultSDConfig = SDConfig{ - Port: 80, - RefreshInterval: model.Duration(5 * time.Minute), - Environment: "AzurePublicCloud", - AuthenticationMethod: authMethodOAuth, - HTTPClientConfig: config_util.DefaultHTTPClientConfig, - } -) +// DefaultSDConfig is the default Azure SD configuration. +var DefaultSDConfig = SDConfig{ + Port: 80, + RefreshInterval: model.Duration(5 * time.Minute), + Environment: "AzurePublicCloud", + AuthenticationMethod: authMethodOAuth, + HTTPClientConfig: config_util.DefaultHTTPClientConfig, +} var environments = map[string]cloud.Configuration{ "AZURECHINACLOUD": cloud.AzureChina, @@ -175,7 +170,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { type Discovery struct { *refresh.Discovery - logger log.Logger + logger *slog.Logger cfg *SDConfig port int cache *cache.Cache[string, *armnetwork.Interface] @@ -183,14 +178,14 @@ type Discovery struct { } // NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets. -func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*azureMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000))) d := &Discovery{ @@ -228,26 +223,26 @@ type azureClient struct { vm *armcompute.VirtualMachinesClient vmss *armcompute.VirtualMachineScaleSetsClient vmssvm *armcompute.VirtualMachineScaleSetVMsClient - logger log.Logger + logger *slog.Logger } var _ client = &azureClient{} -// createAzureClient is a helper function for creating an Azure compute client to ARM. -func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) { - cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment) +// createAzureClient is a helper method for creating an Azure compute client to ARM. +func (d *Discovery) createAzureClient() (client, error) { + cloudConfiguration, err := CloudConfigurationFromName(d.cfg.Environment) if err != nil { return &azureClient{}, err } var c azureClient - c.logger = logger + c.logger = d.logger telemetry := policy.TelemetryOptions{ - ApplicationID: userAgent, + ApplicationID: version.PrometheusUserAgent(), } - credential, err := newCredential(cfg, policy.ClientOptions{ + credential, err := newCredential(*d.cfg, policy.ClientOptions{ Cloud: cloudConfiguration, Telemetry: telemetry, }) @@ -255,7 +250,7 @@ func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) { return &azureClient{}, err } - client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd") + client, err := config_util.NewClientFromConfig(d.cfg.HTTPClientConfig, "azure_sd") if err != nil { return &azureClient{}, err } @@ -267,22 +262,22 @@ func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) { }, } - c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options) + c.vm, err = armcompute.NewVirtualMachinesClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } - c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options) + c.nic, err = armnetwork.NewInterfacesClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } - c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options) + c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } - c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options) + c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(d.cfg.SubscriptionID, credential, options) if err != nil { return &azureClient{}, err } @@ -337,35 +332,27 @@ type virtualMachine struct { } // Create a new azureResource object from an ID string. -func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, error) { +func newAzureResourceFromID(id string, logger *slog.Logger) (*arm.ResourceID, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } resourceID, err := arm.ParseResourceID(id) if err != nil { err := fmt.Errorf("invalid ID '%s': %w", id, err) - level.Error(logger).Log("err", err) + logger.Error("Failed to parse resource ID", "err", err) return &arm.ResourceID{}, err } return resourceID, nil } -func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { - defer level.Debug(d.logger).Log("msg", "Azure discovery completed") - - client, err := createAzureClient(*d.cfg, d.logger) - if err != nil { - d.metrics.failuresCount.Inc() - return nil, fmt.Errorf("could not create Azure client: %w", err) - } - +func (d *Discovery) refreshAzureClient(ctx context.Context, client client) ([]*targetgroup.Group, error) { machines, err := client.getVMs(ctx, d.cfg.ResourceGroup) if err != nil { d.metrics.failuresCount.Inc() return nil, fmt.Errorf("could not get virtual machines: %w", err) } - level.Debug(d.logger).Log("msg", "Found virtual machines during Azure discovery.", "count", len(machines)) + d.logger.Debug("Found virtual machines during Azure discovery.", "count", len(machines)) // Load the vms managed by scale sets. scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup) @@ -418,6 +405,18 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { return []*targetgroup.Group{&tg}, nil } +func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { + defer d.logger.Debug("Azure discovery completed") + + client, err := d.createAzureClient() + if err != nil { + d.metrics.failuresCount.Inc() + return nil, fmt.Errorf("could not create Azure client: %w", err) + } + + return d.refreshAzureClient(ctx, client) +} + func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualMachine) (model.LabelSet, error) { r, err := newAzureResourceFromID(vm.ID, d.logger) if err != nil { @@ -458,11 +457,10 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID) } if err != nil { - if errors.Is(err, errorNotFound) { - level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err) - } else { + if !errors.Is(err, errorNotFound) { return nil, err } + d.logger.Warn("Network interface does not exist", "name", nicID, "err", err) // Get out of this routine because we cannot continue without a network interface. return nil, nil } @@ -480,7 +478,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM // yet support this. On deallocated machines, this value happens to be nil so it // is a cheap and easy way to determine if a machine is allocated or not. if networkInterface.Properties.Primary == nil { - level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name) + d.logger.Debug("Skipping deallocated virtual machine", "machine", vm.Name) return nil, nil } @@ -724,7 +722,7 @@ func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) { rs := time.Duration(random) * time.Second exptime := time.Duration(d.cfg.RefreshInterval*10) + rs d.cache.Set(nicID, netInt, cache.WithExpiration(exptime)) - level.Debug(d.logger).Log("msg", "Adding nic", "nic", nicID, "time", exptime.Seconds()) + d.logger.Debug("Adding nic", "nic", nicID, "time", exptime.Seconds()) } // getFromCache will get the network Interface for the specified nicID diff --git a/discovery/azure/azure_test.go b/discovery/azure/azure_test.go index 32dab66c8c..d7141561e2 100644 --- a/discovery/azure/azure_test.go +++ b/discovery/azure/azure_test.go @@ -15,19 +15,34 @@ package azure import ( "context" - "fmt" + "log/slog" + "net/http" + "slices" + "strings" "testing" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + fake "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + fakenetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake" cache "github.com/Code-Hex/go-generics-cache" "github.com/Code-Hex/go-generics-cache/policy/lru" - "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" ) +const defaultMockNetworkID string = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}" + func TestMain(m *testing.M) { goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("github.com/Code-Hex/go-generics-cache.(*janitor).run.func1"), @@ -96,13 +111,12 @@ func TestVMToLabelSet(t *testing.T) { vmType := "type" location := "westeurope" computerName := "computer_name" - networkID := "/subscriptions/00000000-0000-0000-0000-000000000000/network1" ipAddress := "10.20.30.40" primary := true networkProfile := armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ { - ID: &networkID, + ID: to.Ptr(defaultMockNetworkID), Properties: &armcompute.NetworkInterfaceReferenceProperties{Primary: &primary}, }, }, @@ -139,7 +153,7 @@ func TestVMToLabelSet(t *testing.T) { Location: location, OsType: "Linux", Tags: map[string]*string{}, - NetworkInterfaces: []string{networkID}, + NetworkInterfaces: []string{defaultMockNetworkID}, Size: size, } @@ -150,11 +164,12 @@ func TestVMToLabelSet(t *testing.T) { cfg := DefaultSDConfig d := &Discovery{ cfg: &cfg, - logger: log.NewNopLogger(), + logger: promslog.NewNopLogger(), cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))), } network := armnetwork.Interface{ - Name: &networkID, + Name: to.Ptr(defaultMockNetworkID), + ID: to.Ptr(defaultMockNetworkID), Properties: &armnetwork.InterfacePropertiesFormat{ Primary: &primary, IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ @@ -164,9 +179,9 @@ func TestVMToLabelSet(t *testing.T) { }, }, } - client := &mockAzureClient{ - networkInterface: &network, - } + + client := createMockAzureClient(t, nil, nil, nil, network, nil) + labelSet, err := d.vmToLabelSet(context.Background(), client, actualVM) require.NoError(t, err) require.Len(t, labelSet, 11) @@ -475,34 +490,372 @@ func TestNewAzureResourceFromID(t *testing.T) { } } +func TestAzureRefresh(t *testing.T) { + tests := []struct { + scenario string + vmResp []armcompute.VirtualMachinesClientListAllResponse + vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse + vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse + interfacesResp armnetwork.Interface + expectedTG []*targetgroup.Group + }{ + { + scenario: "VMs, VMSS and VMSSVMs in Multiple Responses", + vmResp: []armcompute.VirtualMachinesClientListAllResponse{ + { + VirtualMachineListResult: armcompute.VirtualMachineListResult{ + Value: []*armcompute.VirtualMachine{ + defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm1"), to.Ptr("vm1")), + defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm2"), to.Ptr("vm2")), + }, + }, + }, + { + VirtualMachineListResult: armcompute.VirtualMachineListResult{ + Value: []*armcompute.VirtualMachine{ + defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm3"), to.Ptr("vm3")), + defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm4"), to.Ptr("vm4")), + }, + }, + }, + }, + vmssResp: []armcompute.VirtualMachineScaleSetsClientListAllResponse{ + { + VirtualMachineScaleSetListWithLinkResult: armcompute.VirtualMachineScaleSetListWithLinkResult{ + Value: []*armcompute.VirtualMachineScaleSet{ + { + ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1"), + Name: to.Ptr("vmScaleSet1"), + Location: to.Ptr("australiaeast"), + Type: to.Ptr("Microsoft.Compute/virtualMachineScaleSets"), + }, + }, + }, + }, + }, + vmssvmResp: []armcompute.VirtualMachineScaleSetVMsClientListResponse{ + { + VirtualMachineScaleSetVMListResult: armcompute.VirtualMachineScaleSetVMListResult{ + Value: []*armcompute.VirtualMachineScaleSetVM{ + defaultVMSSVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm1"), to.Ptr("vmScaleSet1_vm1")), + defaultVMSSVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm2"), to.Ptr("vmScaleSet1_vm2")), + }, + }, + }, + }, + interfacesResp: armnetwork.Interface{ + ID: to.Ptr(defaultMockNetworkID), + Properties: &armnetwork.InterfacePropertiesFormat{ + Primary: to.Ptr(true), + IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ + {Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ + PrivateIPAddress: to.Ptr("10.0.0.1"), + }}, + }, + }, + }, + expectedTG: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm1", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vm1", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm2", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vm2", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm3", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vm3", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm4", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vm4", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm1", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vmScaleSet1_vm1", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_scale_set": "vmScaleSet1", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + { + "__address__": "10.0.0.1:80", + "__meta_azure_machine_computer_name": "computer_name", + "__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm2", + "__meta_azure_machine_location": "australiaeast", + "__meta_azure_machine_name": "vmScaleSet1_vm2", + "__meta_azure_machine_os_type": "Linux", + "__meta_azure_machine_private_ip": "10.0.0.1", + "__meta_azure_machine_resource_group": "{resourceGroup}", + "__meta_azure_machine_scale_set": "vmScaleSet1", + "__meta_azure_machine_size": "size", + "__meta_azure_machine_tag_prometheus": "", + "__meta_azure_subscription_id": "", + "__meta_azure_tenant_id": "", + }, + }, + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.scenario, func(t *testing.T) { + t.Parallel() + azureSDConfig := &DefaultSDConfig + + azureClient := createMockAzureClient(t, tc.vmResp, tc.vmssResp, tc.vmssvmResp, tc.interfacesResp, nil) + + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := azureSDConfig.NewDiscovererMetrics(reg, refreshMetrics) + + sd, err := NewDiscovery(azureSDConfig, nil, metrics) + require.NoError(t, err) + + tg, err := sd.refreshAzureClient(context.Background(), azureClient) + require.NoError(t, err) + + sortTargetsByID(tg[0].Targets) + require.Equal(t, tc.expectedTG, tg) + }) + } +} + type mockAzureClient struct { - networkInterface *armnetwork.Interface + azureClient } -var _ client = &mockAzureClient{} +func createMockAzureClient(t *testing.T, vmResp []armcompute.VirtualMachinesClientListAllResponse, vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse, vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse, interfaceResp armnetwork.Interface, logger *slog.Logger) client { + t.Helper() + mockVMServer := defaultMockVMServer(vmResp) + mockVMSSServer := defaultMockVMSSServer(vmssResp) + mockVMScaleSetVMServer := defaultMockVMSSVMServer(vmssvmResp) + mockInterfaceServer := defaultMockInterfaceServer(interfaceResp) -func (*mockAzureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) { - return nil, nil -} + vmClient, err := armcompute.NewVirtualMachinesClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: fake.NewVirtualMachinesServerTransport(&mockVMServer), + }, + }) + require.NoError(t, err) -func (*mockAzureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) { - return nil, nil -} + vmssClient, err := armcompute.NewVirtualMachineScaleSetsClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: fake.NewVirtualMachineScaleSetsServerTransport(&mockVMSSServer), + }, + }) + require.NoError(t, err) -func (*mockAzureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) { - return nil, nil -} + vmssvmClient, err := armcompute.NewVirtualMachineScaleSetVMsClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: fake.NewVirtualMachineScaleSetVMsServerTransport(&mockVMScaleSetVMServer), + }, + }) + require.NoError(t, err) -func (m *mockAzureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) { - if networkInterfaceID == "" { - return nil, fmt.Errorf("parameter networkInterfaceID cannot be empty") + interfacesClient, err := armnetwork.NewInterfacesClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: fakenetwork.NewInterfacesServerTransport(&mockInterfaceServer), + }, + }) + require.NoError(t, err) + + return &mockAzureClient{ + azureClient: azureClient{ + vm: vmClient, + vmss: vmssClient, + vmssvm: vmssvmClient, + nic: interfacesClient, + logger: logger, + }, } - return m.networkInterface, nil } -func (m *mockAzureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) { - if scaleSetName == "" { - return nil, fmt.Errorf("parameter virtualMachineScaleSetName cannot be empty") +func defaultMockInterfaceServer(interfaceResp armnetwork.Interface) fakenetwork.InterfacesServer { + return fakenetwork.InterfacesServer{ + Get: func(_ context.Context, _, _ string, _ *armnetwork.InterfacesClientGetOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetResponse], errResp azfake.ErrorResponder) { + resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetResponse{Interface: interfaceResp}, nil) + return + }, + GetVirtualMachineScaleSetNetworkInterface: func(_ context.Context, _, _, _, _ string, _ *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse], errResp azfake.ErrorResponder) { + resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse{Interface: interfaceResp}, nil) + return + }, } - return m.networkInterface, nil +} + +func defaultMockVMServer(vmResp []armcompute.VirtualMachinesClientListAllResponse) fake.VirtualMachinesServer { + return fake.VirtualMachinesServer{ + NewListAllPager: func(_ *armcompute.VirtualMachinesClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]) { + for _, page := range vmResp { + resp.AddPage(http.StatusOK, page, nil) + } + return + }, + } +} + +func defaultMockVMSSServer(vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse) fake.VirtualMachineScaleSetsServer { + return fake.VirtualMachineScaleSetsServer{ + NewListAllPager: func(_ *armcompute.VirtualMachineScaleSetsClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]) { + for _, page := range vmssResp { + resp.AddPage(http.StatusOK, page, nil) + } + return + }, + } +} + +func defaultMockVMSSVMServer(vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse) fake.VirtualMachineScaleSetVMsServer { + return fake.VirtualMachineScaleSetVMsServer{ + NewListPager: func(_, _ string, _ *armcompute.VirtualMachineScaleSetVMsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]) { + for _, page := range vmssvmResp { + resp.AddPage(http.StatusOK, page, nil) + } + return + }, + } +} + +func defaultVMWithIDAndName(id, name *string) *armcompute.VirtualMachine { + vmSize := armcompute.VirtualMachineSizeTypes("size") + osType := armcompute.OperatingSystemTypesLinux + defaultID := "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/testVM" + defaultName := "testVM" + + if id == nil { + id = &defaultID + } + if name == nil { + name = &defaultName + } + + return &armcompute.VirtualMachine{ + ID: id, + Name: name, + Type: to.Ptr("Microsoft.Compute/virtualMachines"), + Location: to.Ptr("australiaeast"), + Properties: &armcompute.VirtualMachineProperties{ + OSProfile: &armcompute.OSProfile{ + ComputerName: to.Ptr("computer_name"), + }, + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{ + OSType: &osType, + }, + }, + NetworkProfile: &armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ + { + ID: to.Ptr(defaultMockNetworkID), + }, + }, + }, + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: &vmSize, + }, + }, + Tags: map[string]*string{ + "prometheus": new(string), + }, + } +} + +func defaultVMSSVMWithIDAndName(id, name *string) *armcompute.VirtualMachineScaleSetVM { + vmSize := armcompute.VirtualMachineSizeTypes("size") + osType := armcompute.OperatingSystemTypesLinux + defaultID := "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/testVMScaleSet/virtualMachines/testVM" + defaultName := "testVM" + + if id == nil { + id = &defaultID + } + if name == nil { + name = &defaultName + } + + return &armcompute.VirtualMachineScaleSetVM{ + ID: id, + Name: name, + Type: to.Ptr("Microsoft.Compute/virtualMachines"), + InstanceID: to.Ptr("123"), + Location: to.Ptr("australiaeast"), + Properties: &armcompute.VirtualMachineScaleSetVMProperties{ + OSProfile: &armcompute.OSProfile{ + ComputerName: to.Ptr("computer_name"), + }, + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{ + OSType: &osType, + }, + }, + NetworkProfile: &armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ + {ID: to.Ptr(defaultMockNetworkID)}, + }, + }, + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: &vmSize, + }, + }, + Tags: map[string]*string{ + "prometheus": new(string), + }, + } +} + +func sortTargetsByID(targets []model.LabelSet) { + slices.SortFunc(targets, func(a, b model.LabelSet) int { + return strings.Compare(string(a["__meta_azure_machine_id"]), string(b["__meta_azure_machine_id"])) + }) } diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index bdc1fc8dce..4c8de6e291 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -17,17 +17,18 @@ import ( "context" "errors" "fmt" + "log/slog" "net" + "slices" "strconv" "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" consul "github.com/hashicorp/consul/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -113,8 +114,11 @@ type SDConfig struct { Services []string `yaml:"services,omitempty"` // A list of tags used to filter instances inside a service. Services must contain all tags in the list. ServiceTags []string `yaml:"tags,omitempty"` - // Desired node metadata. + // Desired node metadata. As of Consul 1.14, consider `filter` instead. NodeMeta map[string]string `yaml:"node_meta,omitempty"` + // Consul filter string + // See https://www.consul.io/api-docs/catalog#filtering-1, for syntax + Filter string `yaml:"filter,omitempty"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } @@ -174,22 +178,23 @@ type Discovery struct { watchedServices []string // Set of services which will be discovered. watchedTags []string // Tags used to filter instances of a service. watchedNodeMeta map[string]string + watchedFilter string allowStale bool refreshInterval time.Duration finalizer func() - logger log.Logger + logger *slog.Logger metrics *consulMetrics } // NewDiscovery returns a new Discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*consulMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout)) @@ -218,6 +223,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere watchedServices: conf.Services, watchedTags: conf.ServiceTags, watchedNodeMeta: conf.NodeMeta, + watchedFilter: conf.Filter, allowStale: conf.AllowStale, refreshInterval: time.Duration(conf.RefreshInterval), clientDatacenter: conf.Datacenter, @@ -236,22 +242,17 @@ func (d *Discovery) shouldWatch(name string, tags []string) bool { return d.shouldWatchFromName(name) && d.shouldWatchFromTags(tags) } -// shouldWatch returns whether the service of the given name should be watched based on its name. +// shouldWatchFromName returns whether the service of the given name should be watched based on its name. func (d *Discovery) shouldWatchFromName(name string) bool { // If there's no fixed set of watched services, we watch everything. if len(d.watchedServices) == 0 { return true } - for _, sn := range d.watchedServices { - if sn == name { - return true - } - } - return false + return slices.Contains(d.watchedServices, name) } -// shouldWatch returns whether the service of the given name should be watched based on its tags. +// shouldWatchFromTags returns whether the service of the given name should be watched based on its tags. // This gets called when the user doesn't specify a list of services in order to avoid watching // *all* services. Details in https://github.com/prometheus/prometheus/pull/3814 func (d *Discovery) shouldWatchFromTags(tags []string) bool { @@ -282,7 +283,7 @@ func (d *Discovery) getDatacenter() error { info, err := d.client.Agent().Self() if err != nil { - level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) + d.logger.Error("Error retrieving datacenter name", "err", err) d.metrics.rpcFailuresCount.Inc() return err } @@ -290,12 +291,12 @@ func (d *Discovery) getDatacenter() error { dc, ok := info["Config"]["Datacenter"].(string) if !ok { err := fmt.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"]) - level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) + d.logger.Error("Error retrieving datacenter name", "err", err) return err } d.clientDatacenter = dc - d.logger = log.With(d.logger, "datacenter", dc) + d.logger = d.logger.With("datacenter", dc) return nil } @@ -361,13 +362,14 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { // entire list of services. func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) { catalog := d.client.Catalog() - level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ",")) + d.logger.Debug("Watching services", "tags", strings.Join(d.watchedTags, ","), "filter", d.watchedFilter) opts := &consul.QueryOptions{ WaitIndex: *lastIndex, WaitTime: watchTimeout, AllowStale: d.allowStale, NodeMeta: d.watchedNodeMeta, + Filter: d.watchedFilter, } t0 := time.Now() srvs, meta, err := catalog.Services(opts.WithContext(ctx)) @@ -382,7 +384,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup. } if err != nil { - level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err) + d.logger.Error("Error refreshing service list", "err", err) d.metrics.rpcFailuresCount.Inc() time.Sleep(retryInterval) return @@ -445,7 +447,7 @@ type consulService struct { discovery *Discovery client *consul.Client tagSeparator string - logger log.Logger + logger *slog.Logger rpcFailuresCount prometheus.Counter serviceRPCDuration prometheus.Observer } @@ -490,7 +492,7 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G // Get updates for a service. func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, health *consul.Health, lastIndex *uint64) { - level.Debug(srv.logger).Log("msg", "Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ",")) + srv.logger.Debug("Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ",")) opts := &consul.QueryOptions{ WaitIndex: *lastIndex, @@ -513,7 +515,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr } if err != nil { - level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err) + srv.logger.Error("Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err) srv.rpcFailuresCount.Inc() time.Sleep(retryInterval) return diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index e3bc7938f5..ea896ce31b 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -21,10 +21,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" "gopkg.in/yaml.v2" @@ -252,6 +252,8 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) { case "/v1/catalog/services?index=1&wait=120000ms": time.Sleep(5 * time.Second) response = ServicesTestAnswer + case "/v1/catalog/services?filter=NodeMeta.rack_name+%3D%3D+%222304%22&index=1&wait=120000ms": + response = ServicesTestAnswer default: t.Errorf("Unhandled consul call: %s", r.URL) } @@ -270,7 +272,7 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) { } func newDiscovery(t *testing.T, config *SDConfig) *Discovery { - logger := log.NewNopLogger() + logger := promslog.NewNopLogger() metrics := NewTestMetrics(t, config, prometheus.NewRegistry()) @@ -369,6 +371,27 @@ func TestAllOptions(t *testing.T) { <-ch } +// Watch the test service with a specific tag and node-meta via Filter parameter. +func TestFilterOption(t *testing.T) { + stub, config := newServer(t) + defer stub.Close() + + config.Services = []string{"test"} + config.Filter = `NodeMeta.rack_name == "2304"` + config.Token = "fake-token" + + d := newDiscovery(t, config) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan []*targetgroup.Group) + go func() { + d.Run(ctx, ch) + close(ch) + }() + checkOneTarget(t, <-ch) + cancel() +} + func TestGetDatacenterShouldReturnError(t *testing.T) { for _, tc := range []struct { handler func(http.ResponseWriter, *http.Request) @@ -376,14 +399,14 @@ func TestGetDatacenterShouldReturnError(t *testing.T) { }{ { // Define a handler that will return status 500. - handler: func(w http.ResponseWriter, r *http.Request) { + handler: func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) }, errMessage: "Unexpected response code: 500 ()", }, { // Define a handler that will return incorrect response. - handler: func(w http.ResponseWriter, r *http.Request) { + handler: func(w http.ResponseWriter, _ *http.Request) { w.Write([]byte(`{"Config": {"Not-Datacenter": "test-dc"}}`)) }, errMessage: "invalid value '' for Config.Datacenter", @@ -402,14 +425,14 @@ func TestGetDatacenterShouldReturnError(t *testing.T) { d := newDiscovery(t, config) // Should be empty if not initialized. - require.Equal(t, "", d.clientDatacenter) + require.Empty(t, d.clientDatacenter) err = d.getDatacenter() // An error should be returned. - require.Equal(t, tc.errMessage, err.Error()) + require.EqualError(t, err, tc.errMessage) // Should still be empty. - require.Equal(t, "", d.clientDatacenter) + require.Empty(t, d.clientDatacenter) } } diff --git a/discovery/consul/metrics.go b/discovery/consul/metrics.go index 8266e7cc60..b49509bd8f 100644 --- a/discovery/consul/metrics.go +++ b/discovery/consul/metrics.go @@ -31,7 +31,7 @@ type consulMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &consulMetrics{ rpcFailuresCount: prometheus.NewCounter( prometheus.CounterOpts{ diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index ecee60cb1f..d0ececd9e9 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -15,7 +15,9 @@ package digitalocean import ( "context" + "errors" "fmt" + "log/slog" "net" "net/http" "strconv" @@ -23,7 +25,6 @@ import ( "time" "github.com/digitalocean/godo" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -64,7 +65,7 @@ func init() { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &digitaloceanMetrics{ refreshMetrics: rmi, } @@ -111,10 +112,10 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*digitaloceanMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ @@ -131,7 +132,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }, - godo.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)), + godo.SetUserAgent(version.PrometheusUserAgent()), ) if err != nil { return nil, fmt.Errorf("error setting up digital ocean agent: %w", err) diff --git a/discovery/digitalocean/digitalocean_test.go b/discovery/digitalocean/digitalocean_test.go index 841b5ef977..a282225ac2 100644 --- a/discovery/digitalocean/digitalocean_test.go +++ b/discovery/digitalocean/digitalocean_test.go @@ -19,9 +19,9 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -57,7 +57,7 @@ func TestDigitalOceanSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) endpoint, err := url.Parse(sdmock.Mock.Endpoint()) require.NoError(t, err) diff --git a/discovery/discoverer_metrics_noop.go b/discovery/discoverer_metrics_noop.go index 638317ace1..4321204b6c 100644 --- a/discovery/discoverer_metrics_noop.go +++ b/discovery/discoverer_metrics_noop.go @@ -13,7 +13,7 @@ package discovery -// Create a dummy metrics struct, because this SD doesn't have any metrics. +// NoopDiscovererMetrics creates a dummy metrics struct, because this SD doesn't have any metrics. type NoopDiscovererMetrics struct{} var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil) diff --git a/discovery/discovery.go b/discovery/discovery.go index a5826f8176..c400de3632 100644 --- a/discovery/discovery.go +++ b/discovery/discovery.go @@ -15,9 +15,9 @@ package discovery import ( "context" + "log/slog" "reflect" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -39,7 +39,7 @@ type Discoverer interface { Run(ctx context.Context, up chan<- []*targetgroup.Group) } -// Internal metrics of service discovery mechanisms. +// DiscovererMetrics are internal metrics of service discovery mechanisms. type DiscovererMetrics interface { Register() error Unregister() @@ -47,7 +47,7 @@ type DiscovererMetrics interface { // DiscovererOptions provides options for a Discoverer. type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger Metrics DiscovererMetrics @@ -56,7 +56,7 @@ type DiscovererOptions struct { HTTPClientOptions []config.HTTPClientOption } -// Metrics used by the "refresh" package. +// RefreshMetrics are used by the "refresh" package. // We define them here in the "discovery" package in order to avoid a cyclic dependency between // "discovery" and "refresh". type RefreshMetrics struct { @@ -64,17 +64,18 @@ type RefreshMetrics struct { Duration prometheus.Observer } -// Instantiate the metrics used by the "refresh" package. +// RefreshMetricsInstantiator instantiates the metrics used by the "refresh" package. type RefreshMetricsInstantiator interface { Instantiate(mech string) *RefreshMetrics } -// An interface for registering, unregistering, and instantiating metrics for the "refresh" package. -// Refresh metrics are registered and unregistered outside of the service discovery mechanism. -// This is so that the same metrics can be reused across different service discovery mechanisms. -// To manage refresh metrics inside the SD mechanism, we'd need to use const labels which are -// specific to that SD. However, doing so would also expose too many unused metrics on -// the Prometheus /metrics endpoint. +// RefreshMetricsManager is an interface for registering, unregistering, and +// instantiating metrics for the "refresh" package. Refresh metrics are +// registered and unregistered outside of the service discovery mechanism. This +// is so that the same metrics can be reused across different service discovery +// mechanisms. To manage refresh metrics inside the SD mechanism, we'd need to +// use const labels which are specific to that SD. However, doing so would also +// expose too many unused metrics on the Prometheus /metrics endpoint. type RefreshMetricsManager interface { DiscovererMetrics RefreshMetricsInstantiator @@ -108,7 +109,7 @@ func (c *Configs) SetDirectory(dir string) { // UnmarshalYAML implements yaml.Unmarshaler. func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -123,7 +124,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { // MarshalYAML implements yaml.Marshaler. func (c Configs) MarshalYAML() (interface{}, error) { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -145,7 +146,8 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return staticDiscoverer(c), nil } -// No metrics are needed for this service discovery mechanism. +// NewDiscovererMetrics returns NoopDiscovererMetrics because no metrics are +// needed for this service discovery mechanism. func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { return &NoopDiscovererMetrics{} } diff --git a/discovery/discovery_test.go b/discovery/discovery_test.go new file mode 100644 index 0000000000..af327195f2 --- /dev/null +++ b/discovery/discovery_test.go @@ -0,0 +1,36 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "testing" + + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestConfigsCustomUnMarshalMarshal(t *testing.T) { + input := `static_configs: +- targets: + - foo:1234 + - bar:4321 +` + cfg := &Configs{} + err := yaml.UnmarshalStrict([]byte(input), cfg) + require.NoError(t, err) + + output, err := yaml.Marshal(cfg) + require.NoError(t, err) + require.Equal(t, input, string(output)) +} diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index 314c3d38cd..405dba44f7 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -17,17 +17,17 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/miekg/dns" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" @@ -111,21 +111,21 @@ type Discovery struct { names []string port int qtype uint16 - logger log.Logger + logger *slog.Logger metrics *dnsMetrics - lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) + lookupFn func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*dnsMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } qtype := dns.TypeSRV @@ -174,7 +174,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { for _, name := range d.names { go func(n string) { if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) { - level.Error(d.logger).Log("msg", "Error refreshing DNS targets", "err", err) + d.logger.Error("Error refreshing DNS targets", "err", err) } wg.Done() }(name) @@ -238,7 +238,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ // CNAME responses can occur with "Type: A" dns_sd_config requests. continue default: - level.Warn(d.logger).Log("msg", "Invalid record", "record", record) + d.logger.Warn("Invalid record", "record", record) continue } tg.Targets = append(tg.Targets, model.LabelSet{ @@ -288,7 +288,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ // error will be generic-looking, because trying to return all the errors // returned by the combination of all name permutations and servers is a // nightmare. -func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { +func lookupWithSearchPath(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { conf, err := dns.ClientConfigFromFile(resolvConf) if err != nil { return nil, fmt.Errorf("could not load resolv.conf: %w", err) @@ -337,14 +337,14 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms // A non-viable answer is "anything else", which encompasses both various // system-level problems (like network timeouts) and also // valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc). -func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger log.Logger) (*dns.Msg, error) { +func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger *slog.Logger) (*dns.Msg, error) { client := &dns.Client{} for _, server := range conf.Servers { servAddr := net.JoinHostPort(server, conf.Port) msg, err := askServerForName(name, qtype, client, servAddr, true) if err != nil { - level.Warn(logger).Log("msg", "DNS resolution failed", "server", server, "name", name, "err", err) + logger.Warn("DNS resolution failed", "server", server, "name", name, "err", err) continue } diff --git a/discovery/dns/dns_test.go b/discovery/dns/dns_test.go index 33a976827d..ea46ad3237 100644 --- a/discovery/dns/dns_test.go +++ b/discovery/dns/dns_test.go @@ -15,12 +15,12 @@ package dns import ( "context" - "fmt" + "errors" + "log/slog" "net" "testing" "time" - "github.com/go-kit/log" "github.com/miekg/dns" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -40,7 +40,7 @@ func TestDNS(t *testing.T) { testCases := []struct { name string config SDConfig - lookup func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) + lookup func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) expected []*targetgroup.Group }{ @@ -52,8 +52,8 @@ func TestDNS(t *testing.T) { Port: 80, Type: "A", }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { - return nil, fmt.Errorf("some error") + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { + return nil, errors.New("some error") }, expected: []*targetgroup.Group{}, }, @@ -65,7 +65,7 @@ func TestDNS(t *testing.T) { Port: 80, Type: "A", }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.A{A: net.IPv4(192, 0, 2, 2)}, @@ -97,7 +97,7 @@ func TestDNS(t *testing.T) { Port: 80, Type: "AAAA", }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.AAAA{AAAA: net.IPv6loopback}, @@ -128,7 +128,7 @@ func TestDNS(t *testing.T) { Type: "SRV", RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.SRV{Port: 3306, Target: "db1.example.com."}, @@ -167,7 +167,7 @@ func TestDNS(t *testing.T) { Names: []string{"_mysql._tcp.db.example.com."}, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.SRV{Port: 3306, Target: "db1.example.com."}, @@ -198,7 +198,7 @@ func TestDNS(t *testing.T) { Names: []string{"_mysql._tcp.db.example.com."}, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{}, nil }, expected: []*targetgroup.Group{ @@ -215,7 +215,7 @@ func TestDNS(t *testing.T) { Port: 25, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.MX{Preference: 0, Mx: "smtp1.example.com."}, diff --git a/discovery/eureka/client.go b/discovery/eureka/client.go index 5a90968f1b..e4b54faae6 100644 --- a/discovery/eureka/client.go +++ b/discovery/eureka/client.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/common/version" ) -var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) +var userAgent = version.PrometheusUserAgent() type Applications struct { VersionsDelta int `xml:"versions__delta"` @@ -97,7 +97,6 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic resp.Body.Close() }() - //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode) } diff --git a/discovery/eureka/client_test.go b/discovery/eureka/client_test.go index 83f6fd5ff1..f85409a11e 100644 --- a/discovery/eureka/client_test.go +++ b/discovery/eureka/client_test.go @@ -172,7 +172,7 @@ func TestFetchApps(t *testing.T) { ` // Simulate apps with a valid XML response. - respHandler := func(w http.ResponseWriter, r *http.Request) { + respHandler := func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, appsXML) @@ -199,7 +199,7 @@ func TestFetchApps(t *testing.T) { func Test500ErrorHttpResponse(t *testing.T) { // Simulate 500 error. - respHandler := func(w http.ResponseWriter, r *http.Request) { + respHandler := func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, ``) diff --git a/discovery/eureka/eureka.go b/discovery/eureka/eureka.go index 779c081aee..459b608e96 100644 --- a/discovery/eureka/eureka.go +++ b/discovery/eureka/eureka.go @@ -16,14 +16,13 @@ package eureka import ( "context" "errors" - "fmt" + "log/slog" "net" "net/http" "net/url" "strconv" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -78,7 +77,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &eurekaMetrics{ refreshMetrics: rmi, } @@ -126,10 +125,10 @@ type Discovery struct { } // NewDiscovery creates a new Eureka discovery for the given role. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*eurekaMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd") diff --git a/discovery/eureka/eureka_test.go b/discovery/eureka/eureka_test.go index b499410bfc..5ea9a6c74e 100644 --- a/discovery/eureka/eureka_test.go +++ b/discovery/eureka/eureka_test.go @@ -58,7 +58,7 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err func TestEurekaSDHandleError(t *testing.T) { var ( errTesting = "non 2xx status '500' response during eureka service discovery" - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, ``) @@ -76,7 +76,7 @@ func TestEurekaSDEmptyList(t *testing.T) { 1 ` - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, appsXML) @@ -235,7 +235,7 @@ func TestEurekaSDSendGroup(t *testing.T) { ` - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, appsXML) diff --git a/discovery/file/file.go b/discovery/file/file.go index e7e9d0870f..beea03222b 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -26,12 +27,11 @@ import ( "time" "github.com/fsnotify/fsnotify" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" @@ -175,20 +175,20 @@ type Discovery struct { // and how many target groups they contained. // This is used to detect deleted target groups. lastRefresh map[string]int - logger log.Logger + logger *slog.Logger metrics *fileMetrics } // NewDiscovery returns a new file discovery for the given paths. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { fm, ok := metrics.(*fileMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } disc := &Discovery{ @@ -210,7 +210,7 @@ func (d *Discovery) listFiles() []string { for _, p := range d.paths { files, err := filepath.Glob(p) if err != nil { - level.Error(d.logger).Log("msg", "Error expanding glob", "glob", p, "err", err) + d.logger.Error("Error expanding glob", "glob", p, "err", err) continue } paths = append(paths, files...) @@ -231,7 +231,7 @@ func (d *Discovery) watchFiles() { p = "./" } if err := d.watcher.Add(p); err != nil { - level.Error(d.logger).Log("msg", "Error adding file watch", "path", p, "err", err) + d.logger.Error("Error adding file watch", "path", p, "err", err) } } } @@ -240,7 +240,7 @@ func (d *Discovery) watchFiles() { func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { watcher, err := fsnotify.NewWatcher() if err != nil { - level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) + d.logger.Error("Error adding file watcher", "err", err) d.metrics.fileWatcherErrorsCount.Inc() return } @@ -280,7 +280,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { case err := <-d.watcher.Errors: if err != nil { - level.Error(d.logger).Log("msg", "Error watching file", "err", err) + d.logger.Error("Error watching file", "err", err) } } } @@ -300,7 +300,7 @@ func (d *Discovery) deleteTimestamp(filename string) { // stop shuts down the file watcher. func (d *Discovery) stop() { - level.Debug(d.logger).Log("msg", "Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths)) + d.logger.Debug("Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths)) done := make(chan struct{}) defer close(done) @@ -320,10 +320,10 @@ func (d *Discovery) stop() { } }() if err := d.watcher.Close(); err != nil { - level.Error(d.logger).Log("msg", "Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err) + d.logger.Error("Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err) } - level.Debug(d.logger).Log("msg", "File discovery stopped") + d.logger.Debug("File discovery stopped") } // refresh reads all files matching the discovery's patterns and sends the respective @@ -339,7 +339,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) if err != nil { d.metrics.fileSDReadErrorsCount.Inc() - level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err) + d.logger.Error("Error reading file", "path", p, "err", err) // Prevent deletion down below. ref[p] = d.lastRefresh[p] continue @@ -356,7 +356,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) for f, n := range d.lastRefresh { m, ok := ref[f] if !ok || n > m { - level.Debug(d.logger).Log("msg", "file_sd refresh found file that should be removed", "file", f) + d.logger.Debug("file_sd refresh found file that should be removed", "file", f) d.deleteTimestamp(f) for i := m; i < n; i++ { select { diff --git a/discovery/file/file_test.go b/discovery/file/file_test.go index 179ac5cd1c..46b2ff0262 100644 --- a/discovery/file/file_test.go +++ b/discovery/file/file_test.go @@ -19,6 +19,7 @@ import ( "io" "os" "path/filepath" + "runtime" "sort" "sync" "testing" @@ -319,6 +320,9 @@ func valid2Tg(file string) []*targetgroup.Group { } func TestInitialUpdate(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("flaky test, see https://github.com/prometheus/prometheus/issues/16212") + } for _, tc := range []string{ "fixtures/valid.yml", "fixtures/valid.json", @@ -363,6 +367,9 @@ func TestInvalidFile(t *testing.T) { } func TestNoopFileUpdate(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("flaky test, see https://github.com/prometheus/prometheus/issues/16212") + } t.Parallel() runner := newTestRunner(t) @@ -381,6 +388,9 @@ func TestNoopFileUpdate(t *testing.T) { } func TestFileUpdate(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("flaky test, see https://github.com/prometheus/prometheus/issues/16212") + } t.Parallel() runner := newTestRunner(t) @@ -399,6 +409,9 @@ func TestFileUpdate(t *testing.T) { } func TestInvalidFileUpdate(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("flaky test, see https://github.com/prometheus/prometheus/issues/16212") + } t.Parallel() runner := newTestRunner(t) @@ -421,6 +434,9 @@ func TestInvalidFileUpdate(t *testing.T) { } func TestUpdateFileWithPartialWrites(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("flaky test, see https://github.com/prometheus/prometheus/issues/16212") + } t.Parallel() runner := newTestRunner(t) diff --git a/discovery/file/metrics.go b/discovery/file/metrics.go index c01501e4ef..3e3df7bbf6 100644 --- a/discovery/file/metrics.go +++ b/discovery/file/metrics.go @@ -30,7 +30,7 @@ type fileMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { fm := &fileMetrics{ fileSDReadErrorsCount: prometheus.NewCounter( prometheus.CounterOpts{ diff --git a/discovery/gce/gce.go b/discovery/gce/gce.go index 15f32dd247..32f1bb6722 100644 --- a/discovery/gce/gce.go +++ b/discovery/gce/gce.go @@ -17,12 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "golang.org/x/oauth2/google" @@ -83,7 +83,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &gceMetrics{ refreshMetrics: rmi, } @@ -129,10 +129,10 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*gceMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index df56f94c5f..88fe09bd3e 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -15,12 +15,12 @@ package hetzner import ( "context" + "log/slog" "net" "net/http" "strconv" "time" - "github.com/go-kit/log" "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -53,14 +53,16 @@ const ( // the Discoverer interface. type hcloudDiscovery struct { *refresh.Discovery - client *hcloud.Client - port int + client *hcloud.Client + port int + labelSelector string } // newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets. -func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) { +func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error) { d := &hcloudDiscovery{ - port: conf.Port, + port: conf.Port, + labelSelector: conf.LabelSelector, } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd") @@ -79,7 +81,10 @@ func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) } func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { - servers, err := d.client.Server.All(ctx) + servers, err := d.client.Server.AllWithOpts(ctx, hcloud.ServerListOpts{ListOpts: hcloud.ListOpts{ + PerPage: 50, + LabelSelector: d.labelSelector, + }}) if err != nil { return nil, err } diff --git a/discovery/hetzner/hcloud_test.go b/discovery/hetzner/hcloud_test.go index 10b799037a..fa8291625a 100644 --- a/discovery/hetzner/hcloud_test.go +++ b/discovery/hetzner/hcloud_test.go @@ -18,8 +18,8 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) @@ -43,7 +43,7 @@ func TestHCloudSDRefresh(t *testing.T) { cfg.HTTPClientConfig.BearerToken = hcloudTestToken cfg.hcloudEndpoint = suite.Mock.Endpoint() - d, err := newHcloudDiscovery(&cfg, log.NewNopLogger()) + d, err := newHcloudDiscovery(&cfg, promslog.NewNopLogger()) require.NoError(t, err) targetGroups, err := d.refresh(context.Background()) diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index 69c823d382..9245d933cc 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -17,9 +17,9 @@ import ( "context" "errors" "fmt" + "log/slog" "time" - "github.com/go-kit/log" "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -59,12 +59,15 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` Role Role `yaml:"role"` - hcloudEndpoint string // For tests only. - robotEndpoint string // For tests only. + + LabelSelector string `yaml:"label_selector,omitempty"` + + hcloudEndpoint string // For tests only. + robotEndpoint string // For tests only. } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &hetznerMetrics{ refreshMetrics: rmi, } @@ -135,10 +138,10 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*hetznerMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } r, err := newRefresher(conf, logger) @@ -157,7 +160,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere ), nil } -func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { +func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) { switch conf.Role { case HetznerRoleHcloud: if conf.hcloudEndpoint == "" { diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 64155bfaed..33aa2abcd8 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -18,13 +18,13 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -39,7 +39,7 @@ const ( hetznerLabelRobotCancelled = hetznerRobotLabelPrefix + "cancelled" ) -var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) +var userAgent = version.PrometheusUserAgent() // Discovery periodically performs Hetzner Robot requests. It implements // the Discoverer interface. @@ -51,7 +51,7 @@ type robotDiscovery struct { } // newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets. -func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) { +func newRobotDiscovery(conf *SDConfig, _ *slog.Logger) (*robotDiscovery, error) { d := &robotDiscovery{ port: conf.Port, endpoint: conf.robotEndpoint, @@ -87,7 +87,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) resp.Body.Close() }() - //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode) } diff --git a/discovery/hetzner/robot_test.go b/discovery/hetzner/robot_test.go index abee5fea90..2618bd097c 100644 --- a/discovery/hetzner/robot_test.go +++ b/discovery/hetzner/robot_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) @@ -42,7 +42,7 @@ func TestRobotSDRefresh(t *testing.T) { cfg.HTTPClientConfig.BasicAuth = &config.BasicAuth{Username: robotTestUsername, Password: robotTestPassword} cfg.robotEndpoint = suite.Mock.Endpoint() - d, err := newRobotDiscovery(&cfg, log.NewNopLogger()) + d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger()) require.NoError(t, err) targetGroups, err := d.refresh(context.Background()) @@ -91,12 +91,11 @@ func TestRobotSDRefreshHandleError(t *testing.T) { cfg := DefaultSDConfig cfg.robotEndpoint = suite.Mock.Endpoint() - d, err := newRobotDiscovery(&cfg, log.NewNopLogger()) + d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger()) require.NoError(t, err) targetGroups, err := d.refresh(context.Background()) - require.Error(t, err) - require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error()) + require.EqualError(t, err, "non 2xx status '401' response during hetzner service discovery with role robot") require.Empty(t, targetGroups) } diff --git a/discovery/http/http.go b/discovery/http/http.go index ff76fd7627..ebc1c31f61 100644 --- a/discovery/http/http.go +++ b/discovery/http/http.go @@ -19,17 +19,18 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" @@ -40,10 +41,10 @@ import ( var ( // DefaultSDConfig is the default HTTP SD configuration. DefaultSDConfig = SDConfig{ - RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, + RefreshInterval: model.Duration(60 * time.Second), } - userAgent = fmt.Sprintf("Prometheus/%s", version.Version) + userAgent = version.PrometheusUserAgent() matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) ) @@ -85,17 +86,17 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if c.URL == "" { - return fmt.Errorf("URL is missing") + return errors.New("URL is missing") } parsedURL, err := url.Parse(c.URL) if err != nil { return err } if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { - return fmt.Errorf("URL scheme must be 'http' or 'https'") + return errors.New("URL scheme must be 'http' or 'https'") } if parsedURL.Host == "" { - return fmt.Errorf("host is missing in URL") + return errors.New("host is missing in URL") } return c.HTTPClientConfig.Validate() } @@ -114,14 +115,14 @@ type Discovery struct { } // NewDiscovery returns a new HTTP discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*httpMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", clientOpts...) diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go index 0cafe035dc..3af9e4e504 100644 --- a/discovery/http/http_test.go +++ b/discovery/http/http_test.go @@ -21,11 +21,11 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -49,7 +49,7 @@ func TestHTTPValidRefresh(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() @@ -75,7 +75,7 @@ func TestHTTPValidRefresh(t *testing.T) { } func TestHTTPInvalidCode(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusBadRequest) })) @@ -94,7 +94,7 @@ func TestHTTPInvalidCode(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() @@ -104,7 +104,7 @@ func TestHTTPInvalidCode(t *testing.T) { } func TestHTTPInvalidFormat(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fmt.Fprintln(w, "{}") })) @@ -123,7 +123,7 @@ func TestHTTPInvalidFormat(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) ctx := context.Background() @@ -212,7 +212,7 @@ func TestContentTypeRegex(t *testing.T) { func TestSourceDisappeared(t *testing.T) { var stubResponse string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/json") fmt.Fprintln(w, stubResponse) })) @@ -442,7 +442,7 @@ func TestSourceDisappeared(t *testing.T) { require.NoError(t, metrics.Register()) defer metrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics) require.NoError(t, err) for _, test := range cases { ctx := context.Background() diff --git a/discovery/install/install.go b/discovery/install/install.go index f090076b7f..9c397f9d36 100644 --- a/discovery/install/install.go +++ b/discovery/install/install.go @@ -36,6 +36,7 @@ import ( _ "github.com/prometheus/prometheus/discovery/ovhcloud" // register ovhcloud _ "github.com/prometheus/prometheus/discovery/puppetdb" // register puppetdb _ "github.com/prometheus/prometheus/discovery/scaleway" // register scaleway + _ "github.com/prometheus/prometheus/discovery/stackit" // register stackit _ "github.com/prometheus/prometheus/discovery/triton" // register triton _ "github.com/prometheus/prometheus/discovery/uyuni" // register uyuni _ "github.com/prometheus/prometheus/discovery/vultr" // register vultr diff --git a/discovery/ionos/ionos.go b/discovery/ionos/ionos.go index c8b4f7f8e5..475e6c30eb 100644 --- a/discovery/ionos/ionos.go +++ b/discovery/ionos/ionos.go @@ -15,10 +15,9 @@ package ionos import ( "errors" - "fmt" + "log/slog" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -43,10 +42,10 @@ func init() { type Discovery struct{} // NewDiscovery returns a new refresh.Discovery for IONOS Cloud. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*ionosMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if conf.ionosEndpoint == "" { @@ -90,7 +89,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &ionosMetrics{ refreshMetrics: rmi, } diff --git a/discovery/ionos/server.go b/discovery/ionos/server.go index a850fbbfb4..81bb497277 100644 --- a/discovery/ionos/server.go +++ b/discovery/ionos/server.go @@ -15,14 +15,13 @@ package ionos import ( "context" - "fmt" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" ionoscloud "github.com/ionos-cloud/sdk-go/v6" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -60,7 +59,7 @@ type serverDiscovery struct { datacenterID string } -func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) { +func newServerDiscovery(conf *SDConfig, _ *slog.Logger) (*serverDiscovery, error) { d := &serverDiscovery{ port: conf.Port, datacenterID: conf.DatacenterID, @@ -77,7 +76,7 @@ func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) Transport: rt, Timeout: time.Duration(conf.RefreshInterval), } - cfg.UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) + cfg.UserAgent = version.PrometheusUserAgent() d.client = ionoscloud.NewAPIClient(cfg) diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index c7a60ae6d3..c179779277 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -33,7 +33,7 @@ import ( // Endpoints discovers new endpoint targets. type Endpoints struct { - logger log.Logger + logger *slog.Logger endpointsInf cache.SharedIndexInformer serviceInf cache.SharedInformer @@ -49,9 +49,10 @@ type Endpoints struct { } // NewEndpoints returns a new endpoints discovery. -func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { +// Endpoints API is deprecated in k8s v1.33+, but we should still support it. +func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd) @@ -92,26 +93,23 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding endpoints event handler.", "err", err) + l.Error("Error adding endpoints event handler.", "err", err) } serviceUpdate := func(o interface{}) { svc, err := convertToService(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err) + e.logger.Error("converting to Service object failed", "err", err) return } - ep := &apiv1.Endpoints{} - ep.Namespace = svc.Namespace - ep.Name = svc.Name - obj, exists, err := e.endpointsStore.Get(ep) + obj, exists, err := e.endpointsStore.GetByKey(namespacedName(svc.Namespace, svc.Name)) if exists && err == nil { e.enqueue(obj.(*apiv1.Endpoints)) } if err != nil { - level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err) + e.logger.Error("retrieving endpoints failed", "err", err) } } _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -131,7 +129,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } _, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(old, cur interface{}) { @@ -154,7 +152,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } if e.withNodeMetadata { _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -167,12 +165,15 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca e.enqueueNode(node.Name) }, DeleteFunc: func(o interface{}) { - node := o.(*apiv1.Node) - e.enqueueNode(node.Name) + nodeName, err := nodeName(o) + if err != nil { + l.Error("Error getting Node name", "err", err) + } + e.enqueueNode(nodeName) }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } } @@ -182,7 +183,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca func (e *Endpoints) enqueueNode(nodeName string) { endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err) + e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err) return } @@ -194,7 +195,7 @@ func (e *Endpoints) enqueueNode(nodeName string) { func (e *Endpoints) enqueuePod(podNamespacedName string) { endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for pod", "pod", podNamespacedName, "err", err) + e.logger.Error("Error getting endpoints for pod", "pod", podNamespacedName, "err", err) return } @@ -223,7 +224,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(e.logger).Log("msg", "endpoints informer unable to sync cache") + e.logger.Error("endpoints informer unable to sync cache") } return } @@ -247,13 +248,13 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - level.Error(e.logger).Log("msg", "splitting key failed", "key", key) + e.logger.Error("splitting key failed", "key", key) return true } o, exists, err := e.endpointsStore.GetByKey(key) if err != nil { - level.Error(e.logger).Log("msg", "getting object from store failed", "key", key) + e.logger.Error("getting object from store failed", "key", key) return true } if !exists { @@ -262,7 +263,7 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) } eps, err := convertToEndpoints(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err) + e.logger.Error("converting to Endpoints object failed", "err", err) return true } send(ctx, ch, e.buildEndpoints(eps)) @@ -361,16 +362,19 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { if port.Port == cport.ContainerPort { ports := strconv.FormatUint(uint64(port.Port), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(port.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -397,10 +401,10 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { v := eps.Labels[apiv1.EndpointsOverCapacity] if v == "truncated" { - level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name) + e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name) } if v == "warning" { - level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name) + e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name) } // For all seen pods, check all container ports. If they were not covered @@ -411,7 +415,8 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { continue } - for _, c := range pe.pod.Spec.Containers { + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { @@ -428,6 +433,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), @@ -435,6 +441,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } @@ -448,13 +455,10 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { if ref == nil || ref.Kind != "Pod" { return nil } - p := &apiv1.Pod{} - p.Namespace = ref.Namespace - p.Name = ref.Name - obj, exists, err := e.podStore.Get(p) + obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name)) if err != nil { - level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) + e.logger.Error("resolving pod ref failed", "err", err) return nil } if !exists { @@ -464,31 +468,27 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { } func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) { - svc := &apiv1.Service{} - svc.Namespace = ns - svc.Name = name - - obj, exists, err := e.serviceStore.Get(svc) + obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name)) if err != nil { - level.Error(e.logger).Log("msg", "retrieving service failed", "err", err) + e.logger.Error("retrieving service failed", "err", err) return } if !exists { return } - svc = obj.(*apiv1.Service) + svc := obj.(*apiv1.Service) tg.Labels = tg.Labels.Merge(serviceLabels(svc)) } -func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.Logger, nodeName *string) model.LabelSet { +func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger *slog.Logger, nodeName *string) model.LabelSet { if nodeName == nil { return tg } obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName) if err != nil { - level.Error(logger).Log("msg", "Error getting node", "node", *nodeName, "err", err) + logger.Error("Error getting node", "node", *nodeName, "err", err) return tg } diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index e877657dba..28ad5697bc 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -18,10 +18,12 @@ import ( "testing" "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -95,6 +97,7 @@ func makeEndpoints() *v1.Endpoints { } func TestEndpointsDiscoveryBeforeRun(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -149,6 +152,7 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) { } func TestEndpointsDiscoveryAdd(t *testing.T) { + t.Parallel() obj := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", @@ -244,6 +248,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", @@ -259,6 +264,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -272,6 +278,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { } func TestEndpointsDiscoveryDelete(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) k8sDiscoveryTest{ @@ -290,6 +297,7 @@ func TestEndpointsDiscoveryDelete(t *testing.T) { } func TestEndpointsDiscoveryUpdate(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) k8sDiscoveryTest{ @@ -361,6 +369,7 @@ func TestEndpointsDiscoveryUpdate(t *testing.T) { } func TestEndpointsDiscoveryEmptySubsets(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) k8sDiscoveryTest{ @@ -389,6 +398,7 @@ func TestEndpointsDiscoveryEmptySubsets(t *testing.T) { } func TestEndpointsDiscoveryWithService(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) k8sDiscoveryTest{ @@ -454,6 +464,7 @@ func TestEndpointsDiscoveryWithService(t *testing.T) { } func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) k8sDiscoveryTest{ @@ -534,6 +545,7 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { } func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { + t.Parallel() metadataConfig := AttachMetadataConfig{Node: true} nodeLabels1 := map[string]string{"az": "us-east1"} nodeLabels2 := map[string]string{"az": "us-west2"} @@ -607,6 +619,7 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { } func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { + t.Parallel() nodeLabels1 := map[string]string{"az": "us-east1"} nodeLabels2 := map[string]string{"az": "us-west2"} node1 := makeNode("foobar", "", "", nodeLabels1, nil) @@ -684,6 +697,7 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { } func TestEndpointsDiscoveryNamespaces(t *testing.T) { + t.Parallel() epOne := makeEndpoints() epOne.Namespace = "ns1" objs := []runtime.Object{ @@ -821,6 +835,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -834,6 +849,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) { } func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { + t.Parallel() epOne := makeEndpoints() epOne.Namespace = "own-ns" @@ -928,6 +944,7 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { } func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { + t.Parallel() ep := makeEndpoints() ep.Namespace = "ns" @@ -970,9 +987,10 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { }.Run(t) } -// TestEndpointsUpdatePod makes sure that Endpoints discovery detects underlying Pods changes. +// TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes. // See https://github.com/prometheus/prometheus/issues/11305 for more details. func TestEndpointsDiscoveryUpdatePod(t *testing.T) { + t.Parallel() pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", @@ -1078,6 +1096,7 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -1089,3 +1108,187 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) { }, }.Run(t) } + +func TestEndpointsDiscoverySidecarContainer(t *testing.T) { + t.Parallel() + objs := []runtime.Object{ + &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testsidecar", + Namespace: "default", + }, + Subsets: []v1.EndpointSubset{ + { + Addresses: []v1.EndpointAddress{ + { + IP: "4.3.2.1", + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9000, + Protocol: v1.ProtocolTCP, + }, + { + Name: "initport", + Port: 9111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + InitContainers: []v1.Container{ + { + Name: "ic1", + Image: "ic1:latest", + Ports: []v1.ContainerPort{ + { + Name: "initport", + ContainerPort: 1111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + { + Name: "ic2", + Image: "ic2:latest", + Ports: []v1.ContainerPort{ + { + Name: "initport", + ContainerPort: 9111, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "c1", + Image: "c1:latest", + Ports: []v1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + }, + } + + n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testsidecar": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_pod_container_image": "c1:latest", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", + }, + { + "__address__": "4.3.2.1:9111", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_endpoint_port_name": "initport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_pod_container_image": "ic2:latest", + "__meta_kubernetes_pod_container_name": "ic2", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "9111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + { + "__address__": "4.3.2.1:1111", + "__meta_kubernetes_pod_container_image": "ic1:latest", + "__meta_kubernetes_pod_container_name": "ic1", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "1111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_endpoints_name": "testsidecar", + "__meta_kubernetes_namespace": "default", + }, + Source: "endpoints/default/testsidecar", + }, + }, + }.Run(t) +} + +func BenchmarkResolvePodRef(b *testing.B) { + indexer := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, nil) + e := &Endpoints{ + podStore: indexer, + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + p := e.resolvePodRef(&v1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "foo", + }) + require.Nil(b, p) + } +} diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 7a70255c12..625601abc1 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -16,17 +16,15 @@ package kubernetes import ( "context" "errors" - "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -34,9 +32,11 @@ import ( "github.com/prometheus/prometheus/util/strutil" ) +const serviceIndex = "service" + // EndpointSlice discovers new endpoint targets. type EndpointSlice struct { - logger log.Logger + logger *slog.Logger endpointSliceInf cache.SharedIndexInformer serviceInf cache.SharedInformer @@ -52,9 +52,9 @@ type EndpointSlice struct { } // NewEndpointSlice returns a new endpointslice discovery. -func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { +func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd) @@ -93,28 +93,24 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod }, }) if err != nil { - level.Error(l).Log("msg", "Error adding endpoint slices event handler.", "err", err) + l.Error("Error adding endpoint slices event handler.", "err", err) } serviceUpdate := func(o interface{}) { svc, err := convertToService(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err) + e.logger.Error("converting to Service object failed", "err", err) return } - // TODO(brancz): use cache.Indexer to index endpoints by - // disv1beta1.LabelServiceName so this operation doesn't have to - // iterate over all endpoint objects. - for _, obj := range e.endpointSliceStore.List() { - esa, err := e.getEndpointSliceAdaptor(obj) - if err != nil { - level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err) - continue - } - if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name { - e.enqueue(esa.get()) - } + endpointSlices, err := e.endpointSliceInf.GetIndexer().ByIndex(serviceIndex, namespacedName(svc.Namespace, svc.Name)) + if err != nil { + e.logger.Error("getting endpoint slices by service name failed", "err", err) + return + } + + for _, endpointSlice := range endpointSlices { + e.enqueue(endpointSlice) } } _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -132,7 +128,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } if e.withNodeMetadata { @@ -146,12 +142,15 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod e.enqueueNode(node.Name) }, DeleteFunc: func(o interface{}) { - node := o.(*apiv1.Node) - e.enqueueNode(node.Name) + nodeName, err := nodeName(o) + if err != nil { + l.Error("Error getting Node name", "err", err) + } + e.enqueueNode(nodeName) }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } } @@ -161,7 +160,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod func (e *EndpointSlice) enqueueNode(nodeName string) { endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err) + e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err) return } @@ -189,7 +188,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) } if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache") + e.logger.Error("endpointslice informer unable to sync cache") } return } @@ -213,13 +212,13 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - level.Error(e.logger).Log("msg", "splitting key failed", "key", key) + e.logger.Error("splitting key failed", "key", key) return true } o, exists, err := e.endpointSliceStore.GetByKey(key) if err != nil { - level.Error(e.logger).Log("msg", "getting object from store failed", "key", key) + e.logger.Error("getting object from store failed", "key", key) return true } if !exists { @@ -227,29 +226,17 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr return true } - esa, err := e.getEndpointSliceAdaptor(o) - if err != nil { - level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err) - return true + if es, ok := o.(*v1.EndpointSlice); ok { + send(ctx, ch, e.buildEndpointSlice(*es)) + } else { + e.logger.Error("received unexpected object", "object", o) + return false } - - send(ctx, ch, e.buildEndpointSlice(esa)) return true } -func (e *EndpointSlice) getEndpointSliceAdaptor(o interface{}) (endpointSliceAdaptor, error) { - switch endpointSlice := o.(type) { - case *v1.EndpointSlice: - return newEndpointSliceAdaptorFromV1(endpointSlice), nil - case *v1beta1.EndpointSlice: - return newEndpointSliceAdaptorFromV1beta1(endpointSlice), nil - default: - return nil, fmt.Errorf("received unexpected object: %v", o) - } -} - -func endpointSliceSource(ep endpointSliceAdaptor) string { - return endpointSliceSourceFromNamespaceAndName(ep.namespace(), ep.name()) +func endpointSliceSource(ep v1.EndpointSlice) string { + return endpointSliceSourceFromNamespaceAndName(ep.Namespace, ep.Name) } func endpointSliceSourceFromNamespaceAndName(namespace, name string) string { @@ -274,95 +261,95 @@ const ( endpointSliceEndpointTopologyLabelPresentPrefix = metaLabelPrefix + "endpointslice_endpoint_topology_present_" ) -func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgroup.Group { +func (e *EndpointSlice) buildEndpointSlice(eps v1.EndpointSlice) *targetgroup.Group { tg := &targetgroup.Group{ Source: endpointSliceSource(eps), } tg.Labels = model.LabelSet{ - namespaceLabel: lv(eps.namespace()), - endpointSliceAddressTypeLabel: lv(eps.addressType()), + namespaceLabel: lv(eps.Namespace), + endpointSliceAddressTypeLabel: lv(string(eps.AddressType)), } - addObjectMetaLabels(tg.Labels, eps.getObjectMeta(), RoleEndpointSlice) + addObjectMetaLabels(tg.Labels, eps.ObjectMeta, RoleEndpointSlice) e.addServiceLabels(eps, tg) type podEntry struct { pod *apiv1.Pod - servicePorts []endpointSlicePortAdaptor + servicePorts []v1.EndpointPort } seenPods := map[string]*podEntry{} - add := func(addr string, ep endpointSliceEndpointAdaptor, port endpointSlicePortAdaptor) { + add := func(addr string, ep v1.Endpoint, port v1.EndpointPort) { a := addr - if port.port() != nil { - a = net.JoinHostPort(addr, strconv.FormatUint(uint64(*port.port()), 10)) + if port.Port != nil { + a = net.JoinHostPort(addr, strconv.FormatUint(uint64(*port.Port), 10)) } target := model.LabelSet{ model.AddressLabel: lv(a), } - if port.name() != nil { - target[endpointSlicePortNameLabel] = lv(*port.name()) + if port.Name != nil { + target[endpointSlicePortNameLabel] = lv(*port.Name) } - if port.protocol() != nil { - target[endpointSlicePortProtocolLabel] = lv(*port.protocol()) + if port.Protocol != nil { + target[endpointSlicePortProtocolLabel] = lv(string(*port.Protocol)) } - if port.port() != nil { - target[endpointSlicePortLabel] = lv(strconv.FormatUint(uint64(*port.port()), 10)) + if port.Port != nil { + target[endpointSlicePortLabel] = lv(strconv.FormatUint(uint64(*port.Port), 10)) } - if port.appProtocol() != nil { - target[endpointSlicePortAppProtocol] = lv(*port.appProtocol()) + if port.AppProtocol != nil { + target[endpointSlicePortAppProtocol] = lv(*port.AppProtocol) } - if ep.conditions().ready() != nil { - target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.conditions().ready())) + if ep.Conditions.Ready != nil { + target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.Conditions.Ready)) } - if ep.conditions().serving() != nil { - target[endpointSliceEndpointConditionsServingLabel] = lv(strconv.FormatBool(*ep.conditions().serving())) + if ep.Conditions.Serving != nil { + target[endpointSliceEndpointConditionsServingLabel] = lv(strconv.FormatBool(*ep.Conditions.Serving)) } - if ep.conditions().terminating() != nil { - target[endpointSliceEndpointConditionsTerminatingLabel] = lv(strconv.FormatBool(*ep.conditions().terminating())) + if ep.Conditions.Terminating != nil { + target[endpointSliceEndpointConditionsTerminatingLabel] = lv(strconv.FormatBool(*ep.Conditions.Terminating)) } - if ep.hostname() != nil { - target[endpointSliceEndpointHostnameLabel] = lv(*ep.hostname()) + if ep.Hostname != nil { + target[endpointSliceEndpointHostnameLabel] = lv(*ep.Hostname) } - if ep.targetRef() != nil { - target[model.LabelName(endpointSliceAddressTargetKindLabel)] = lv(ep.targetRef().Kind) - target[model.LabelName(endpointSliceAddressTargetNameLabel)] = lv(ep.targetRef().Name) + if ep.TargetRef != nil { + target[model.LabelName(endpointSliceAddressTargetKindLabel)] = lv(ep.TargetRef.Kind) + target[model.LabelName(endpointSliceAddressTargetNameLabel)] = lv(ep.TargetRef.Name) } - if ep.nodename() != nil { - target[endpointSliceEndpointNodenameLabel] = lv(*ep.nodename()) + if ep.NodeName != nil { + target[endpointSliceEndpointNodenameLabel] = lv(*ep.NodeName) } - if ep.zone() != nil { - target[model.LabelName(endpointSliceEndpointZoneLabel)] = lv(*ep.zone()) + if ep.Zone != nil { + target[model.LabelName(endpointSliceEndpointZoneLabel)] = lv(*ep.Zone) } - for k, v := range ep.topology() { + for k, v := range ep.DeprecatedTopology { ln := strutil.SanitizeLabelName(k) target[model.LabelName(endpointSliceEndpointTopologyLabelPrefix+ln)] = lv(v) target[model.LabelName(endpointSliceEndpointTopologyLabelPresentPrefix+ln)] = presentValue } if e.withNodeMetadata { - if ep.targetRef() != nil && ep.targetRef().Kind == "Node" { - target = addNodeLabels(target, e.nodeInf, e.logger, &ep.targetRef().Name) + if ep.TargetRef != nil && ep.TargetRef.Kind == "Node" { + target = addNodeLabels(target, e.nodeInf, e.logger, &ep.TargetRef.Name) } else { - target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename()) + target = addNodeLabels(target, e.nodeInf, e.logger, ep.NodeName) } } - pod := e.resolvePodRef(ep.targetRef()) + pod := e.resolvePodRef(ep.TargetRef) if pod == nil { // This target is not a Pod, so don't continue with Pod specific logic. tg.Targets = append(tg.Targets, target) @@ -380,19 +367,23 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { - if port.port() == nil { + if port.Port == nil { continue } - if *port.port() == cport.ContainerPort { - ports := strconv.FormatUint(uint64(*port.port()), 10) + + if *port.Port == cport.ContainerPort { + ports := strconv.FormatUint(uint64(*port.Port), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(cport.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -404,9 +395,9 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou tg.Targets = append(tg.Targets, target) } - for _, ep := range eps.endpoints() { - for _, port := range eps.ports() { - for _, addr := range ep.addresses() { + for _, ep := range eps.Endpoints { + for _, port := range eps.Ports { + for _, addr := range ep.Addresses { add(addr, ep, port) } } @@ -420,14 +411,15 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou continue } - for _, c := range pe.pod.Spec.Containers { + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { - if eport.port() == nil { + if eport.Port == nil { continue } - if cport.ContainerPort == *eport.port() { + if cport.ContainerPort == *eport.Port { return true } } @@ -440,6 +432,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), @@ -447,6 +440,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } @@ -460,13 +454,10 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { if ref == nil || ref.Kind != "Pod" { return nil } - p := &apiv1.Pod{} - p.Namespace = ref.Namespace - p.Name = ref.Name - obj, exists, err := e.podStore.Get(p) + obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name)) if err != nil { - level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) + e.logger.Error("resolving pod ref failed", "err", err) return nil } if !exists { @@ -475,29 +466,29 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { return obj.(*apiv1.Pod) } -func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) { +func (e *EndpointSlice) addServiceLabels(esa v1.EndpointSlice, tg *targetgroup.Group) { var ( - svc = &apiv1.Service{} found bool + name string ) - svc.Namespace = esa.namespace() + ns := esa.Namespace // Every EndpointSlice object has the Service they belong to in the // kubernetes.io/service-name label. - svc.Name, found = esa.labels()[esa.labelServiceName()] + name, found = esa.Labels[v1.LabelServiceName] if !found { return } - obj, exists, err := e.serviceStore.Get(svc) + obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name)) if err != nil { - level.Error(e.logger).Log("msg", "retrieving service failed", "err", err) + e.logger.Error("retrieving service failed", "err", err) return } if !exists { return } - svc = obj.(*apiv1.Service) + svc := obj.(*apiv1.Service) tg.Labels = tg.Labels.Merge(serviceLabels(svc)) } diff --git a/discovery/kubernetes/endpointslice_adaptor.go b/discovery/kubernetes/endpointslice_adaptor.go deleted file mode 100644 index edd64fcb32..0000000000 --- a/discovery/kubernetes/endpointslice_adaptor.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package kubernetes - -import ( - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// endpointSliceAdaptor is an adaptor for the different EndpointSlice versions. -type endpointSliceAdaptor interface { - get() interface{} - getObjectMeta() metav1.ObjectMeta - name() string - namespace() string - addressType() string - endpoints() []endpointSliceEndpointAdaptor - ports() []endpointSlicePortAdaptor - labels() map[string]string - labelServiceName() string -} - -type endpointSlicePortAdaptor interface { - name() *string - port() *int32 - protocol() *string - appProtocol() *string -} - -type endpointSliceEndpointAdaptor interface { - addresses() []string - hostname() *string - nodename() *string - zone() *string - conditions() endpointSliceEndpointConditionsAdaptor - targetRef() *corev1.ObjectReference - topology() map[string]string -} - -type endpointSliceEndpointConditionsAdaptor interface { - ready() *bool - serving() *bool - terminating() *bool -} - -// Adaptor for k8s.io/api/discovery/v1. -type endpointSliceAdaptorV1 struct { - endpointSlice *v1.EndpointSlice -} - -func newEndpointSliceAdaptorFromV1(endpointSlice *v1.EndpointSlice) endpointSliceAdaptor { - return &endpointSliceAdaptorV1{endpointSlice: endpointSlice} -} - -func (e *endpointSliceAdaptorV1) get() interface{} { - return e.endpointSlice -} - -func (e *endpointSliceAdaptorV1) getObjectMeta() metav1.ObjectMeta { - return e.endpointSlice.ObjectMeta -} - -func (e *endpointSliceAdaptorV1) name() string { - return e.endpointSlice.ObjectMeta.Name -} - -func (e *endpointSliceAdaptorV1) namespace() string { - return e.endpointSlice.ObjectMeta.Namespace -} - -func (e *endpointSliceAdaptorV1) addressType() string { - return string(e.endpointSlice.AddressType) -} - -func (e *endpointSliceAdaptorV1) endpoints() []endpointSliceEndpointAdaptor { - eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints)) - for i := 0; i < len(e.endpointSlice.Endpoints); i++ { - eps = append(eps, newEndpointSliceEndpointAdaptorFromV1(e.endpointSlice.Endpoints[i])) - } - return eps -} - -func (e *endpointSliceAdaptorV1) ports() []endpointSlicePortAdaptor { - ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports)) - for i := 0; i < len(e.endpointSlice.Ports); i++ { - ports = append(ports, newEndpointSlicePortAdaptorFromV1(e.endpointSlice.Ports[i])) - } - return ports -} - -func (e *endpointSliceAdaptorV1) labels() map[string]string { - return e.endpointSlice.Labels -} - -func (e *endpointSliceAdaptorV1) labelServiceName() string { - return v1.LabelServiceName -} - -// Adaptor for k8s.io/api/discovery/v1beta1. -type endpointSliceAdaptorV1Beta1 struct { - endpointSlice *v1beta1.EndpointSlice -} - -func newEndpointSliceAdaptorFromV1beta1(endpointSlice *v1beta1.EndpointSlice) endpointSliceAdaptor { - return &endpointSliceAdaptorV1Beta1{endpointSlice: endpointSlice} -} - -func (e *endpointSliceAdaptorV1Beta1) get() interface{} { - return e.endpointSlice -} - -func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { - return e.endpointSlice.ObjectMeta -} - -func (e *endpointSliceAdaptorV1Beta1) name() string { - return e.endpointSlice.Name -} - -func (e *endpointSliceAdaptorV1Beta1) namespace() string { - return e.endpointSlice.Namespace -} - -func (e *endpointSliceAdaptorV1Beta1) addressType() string { - return string(e.endpointSlice.AddressType) -} - -func (e *endpointSliceAdaptorV1Beta1) endpoints() []endpointSliceEndpointAdaptor { - eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints)) - for i := 0; i < len(e.endpointSlice.Endpoints); i++ { - eps = append(eps, newEndpointSliceEndpointAdaptorFromV1beta1(e.endpointSlice.Endpoints[i])) - } - return eps -} - -func (e *endpointSliceAdaptorV1Beta1) ports() []endpointSlicePortAdaptor { - ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports)) - for i := 0; i < len(e.endpointSlice.Ports); i++ { - ports = append(ports, newEndpointSlicePortAdaptorFromV1beta1(e.endpointSlice.Ports[i])) - } - return ports -} - -func (e *endpointSliceAdaptorV1Beta1) labels() map[string]string { - return e.endpointSlice.Labels -} - -func (e *endpointSliceAdaptorV1Beta1) labelServiceName() string { - return v1beta1.LabelServiceName -} - -type endpointSliceEndpointAdaptorV1 struct { - endpoint v1.Endpoint -} - -func newEndpointSliceEndpointAdaptorFromV1(endpoint v1.Endpoint) endpointSliceEndpointAdaptor { - return &endpointSliceEndpointAdaptorV1{endpoint: endpoint} -} - -func (e *endpointSliceEndpointAdaptorV1) addresses() []string { - return e.endpoint.Addresses -} - -func (e *endpointSliceEndpointAdaptorV1) hostname() *string { - return e.endpoint.Hostname -} - -func (e *endpointSliceEndpointAdaptorV1) nodename() *string { - return e.endpoint.NodeName -} - -func (e *endpointSliceEndpointAdaptorV1) zone() *string { - return e.endpoint.Zone -} - -func (e *endpointSliceEndpointAdaptorV1) conditions() endpointSliceEndpointConditionsAdaptor { - return newEndpointSliceEndpointConditionsAdaptorFromV1(e.endpoint.Conditions) -} - -func (e *endpointSliceEndpointAdaptorV1) targetRef() *corev1.ObjectReference { - return e.endpoint.TargetRef -} - -func (e *endpointSliceEndpointAdaptorV1) topology() map[string]string { - return e.endpoint.DeprecatedTopology -} - -type endpointSliceEndpointConditionsAdaptorV1 struct { - endpointConditions v1.EndpointConditions -} - -func newEndpointSliceEndpointConditionsAdaptorFromV1(endpointConditions v1.EndpointConditions) endpointSliceEndpointConditionsAdaptor { - return &endpointSliceEndpointConditionsAdaptorV1{endpointConditions: endpointConditions} -} - -func (e *endpointSliceEndpointConditionsAdaptorV1) ready() *bool { - return e.endpointConditions.Ready -} - -func (e *endpointSliceEndpointConditionsAdaptorV1) serving() *bool { - return e.endpointConditions.Serving -} - -func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool { - return e.endpointConditions.Terminating -} - -type endpointSliceEndpointAdaptorV1beta1 struct { - endpoint v1beta1.Endpoint -} - -func newEndpointSliceEndpointAdaptorFromV1beta1(endpoint v1beta1.Endpoint) endpointSliceEndpointAdaptor { - return &endpointSliceEndpointAdaptorV1beta1{endpoint: endpoint} -} - -func (e *endpointSliceEndpointAdaptorV1beta1) addresses() []string { - return e.endpoint.Addresses -} - -func (e *endpointSliceEndpointAdaptorV1beta1) hostname() *string { - return e.endpoint.Hostname -} - -func (e *endpointSliceEndpointAdaptorV1beta1) nodename() *string { - return e.endpoint.NodeName -} - -func (e *endpointSliceEndpointAdaptorV1beta1) zone() *string { - return nil -} - -func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor { - return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions) -} - -func (e *endpointSliceEndpointAdaptorV1beta1) targetRef() *corev1.ObjectReference { - return e.endpoint.TargetRef -} - -func (e *endpointSliceEndpointAdaptorV1beta1) topology() map[string]string { - return e.endpoint.Topology -} - -type endpointSliceEndpointConditionsAdaptorV1beta1 struct { - endpointConditions v1beta1.EndpointConditions -} - -func newEndpointSliceEndpointConditionsAdaptorFromV1beta1(endpointConditions v1beta1.EndpointConditions) endpointSliceEndpointConditionsAdaptor { - return &endpointSliceEndpointConditionsAdaptorV1beta1{endpointConditions: endpointConditions} -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool { - return e.endpointConditions.Ready -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool { - return e.endpointConditions.Serving -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool { - return e.endpointConditions.Terminating -} - -type endpointSlicePortAdaptorV1 struct { - endpointPort v1.EndpointPort -} - -func newEndpointSlicePortAdaptorFromV1(port v1.EndpointPort) endpointSlicePortAdaptor { - return &endpointSlicePortAdaptorV1{endpointPort: port} -} - -func (e *endpointSlicePortAdaptorV1) name() *string { - return e.endpointPort.Name -} - -func (e *endpointSlicePortAdaptorV1) port() *int32 { - return e.endpointPort.Port -} - -func (e *endpointSlicePortAdaptorV1) protocol() *string { - val := string(*e.endpointPort.Protocol) - return &val -} - -func (e *endpointSlicePortAdaptorV1) appProtocol() *string { - return e.endpointPort.AppProtocol -} - -type endpointSlicePortAdaptorV1beta1 struct { - endpointPort v1beta1.EndpointPort -} - -func newEndpointSlicePortAdaptorFromV1beta1(port v1beta1.EndpointPort) endpointSlicePortAdaptor { - return &endpointSlicePortAdaptorV1beta1{endpointPort: port} -} - -func (e *endpointSlicePortAdaptorV1beta1) name() *string { - return e.endpointPort.Name -} - -func (e *endpointSlicePortAdaptorV1beta1) port() *int32 { - return e.endpointPort.Port -} - -func (e *endpointSlicePortAdaptorV1beta1) protocol() *string { - val := string(*e.endpointPort.Protocol) - return &val -} - -func (e *endpointSlicePortAdaptorV1beta1) appProtocol() *string { - return e.endpointPort.AppProtocol -} diff --git a/discovery/kubernetes/endpointslice_adaptor_test.go b/discovery/kubernetes/endpointslice_adaptor_test.go deleted file mode 100644 index 1ee3337193..0000000000 --- a/discovery/kubernetes/endpointslice_adaptor_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package kubernetes - -import ( - "testing" - - "github.com/stretchr/testify/require" - v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" -) - -func Test_EndpointSliceAdaptor_v1(t *testing.T) { - endpointSlice := makeEndpointSliceV1() - adaptor := newEndpointSliceAdaptorFromV1(endpointSlice) - - require.Equal(t, endpointSlice.ObjectMeta.Name, adaptor.name()) - require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace()) - require.Equal(t, endpointSlice.AddressType, v1.AddressType(adaptor.addressType())) - require.Equal(t, endpointSlice.Labels, adaptor.labels()) - require.Equal(t, "testendpoints", endpointSlice.Labels[v1.LabelServiceName]) - - for i, endpointAdaptor := range adaptor.endpoints() { - require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) - require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating()) - require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef()) - require.Equal(t, endpointSlice.Endpoints[i].DeprecatedTopology, endpointAdaptor.topology()) - } - - for i, portAdaptor := range adaptor.ports() { - require.Equal(t, endpointSlice.Ports[i].Name, portAdaptor.name()) - require.Equal(t, endpointSlice.Ports[i].Port, portAdaptor.port()) - require.EqualValues(t, endpointSlice.Ports[i].Protocol, portAdaptor.protocol()) - require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol()) - } -} - -func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) { - endpointSlice := makeEndpointSliceV1beta1() - adaptor := newEndpointSliceAdaptorFromV1beta1(endpointSlice) - - require.Equal(t, endpointSlice.ObjectMeta.Name, adaptor.name()) - require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace()) - require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType())) - require.Equal(t, endpointSlice.Labels, adaptor.labels()) - require.Equal(t, "testendpoints", endpointSlice.Labels[v1beta1.LabelServiceName]) - - for i, endpointAdaptor := range adaptor.endpoints() { - require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) - require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving()) - require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating()) - require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef()) - require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology()) - } - - for i, portAdaptor := range adaptor.ports() { - require.Equal(t, endpointSlice.Ports[i].Name, portAdaptor.name()) - require.Equal(t, endpointSlice.Ports[i].Port, portAdaptor.port()) - require.EqualValues(t, endpointSlice.Ports[i].Protocol, portAdaptor.protocol()) - require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol()) - } -} diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index 6ef83081be..9eea9abd7b 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -114,62 +113,9 @@ func makeEndpointSliceV1() *v1.EndpointSlice { } } -func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice { - return &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - Labels: map[string]string{ - v1beta1.LabelServiceName: "testendpoints", - }, - Annotations: map[string]string{ - "test.annotation": "test", - }, - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{ - { - Addresses: []string{"1.2.3.4"}, - Hostname: strptr("testendpoint1"), - }, { - Addresses: []string{"2.3.4.5"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - Serving: boolptr(true), - Terminating: boolptr(false), - }, - }, { - Addresses: []string{"3.4.5.6"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(false), - Serving: boolptr(true), - Terminating: boolptr(true), - }, - }, { - Addresses: []string{"4.5.6.7"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - Serving: boolptr(true), - Terminating: boolptr(false), - }, - TargetRef: &corev1.ObjectReference{ - Kind: "Node", - Name: "barbaz", - }, - }, - }, - } -} - func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.25.0") + t.Parallel() + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ discovery: n, @@ -249,72 +195,8 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { }.Run(t) } -func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "1.20.0") - - k8sDiscoveryTest{ - discovery: n, - beforeRun: func() { - obj := makeEndpointSliceV1beta1() - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: map[string]*targetgroup.Group{ - "endpointslice/default/testendpoints": { - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, - Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_namespace": "default", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - }, - Source: "endpointslice/default/testendpoints", - }, - }, - }.Run(t) -} - func TestEndpointSliceDiscoveryAdd(t *testing.T) { + t.Parallel() obj := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", @@ -353,25 +235,25 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { PodIP: "1.2.3.4", }, } - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.20.0", obj) + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, obj) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ + obj := &v1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", Namespace: "default", }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ + AddressType: v1.AddressTypeIPv4, + Ports: []v1.EndpointPort{ { Name: strptr("testport"), Port: int32ptr(9000), Protocol: protocolptr(corev1.ProtocolTCP), }, }, - Endpoints: []v1beta1.Endpoint{ + Endpoints: []v1.Endpoint{ { Addresses: []string{"4.3.2.1"}, TargetRef: &corev1.ObjectReference{ @@ -379,13 +261,13 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { Name: "testpod", Namespace: "default", }, - Conditions: v1beta1.EndpointConditions{ + Conditions: v1.EndpointConditions{ Ready: boolptr(false), }, }, }, } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) + c.DiscoveryV1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) }, expectedMaxItems: 1, expectedRes: map[string]*targetgroup.Group{ @@ -411,6 +293,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, { "__address__": "1.2.3.4:9001", @@ -426,6 +309,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -440,118 +324,36 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { } func TestEndpointSliceDiscoveryDelete(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + t.Parallel() + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { obj := makeEndpointSliceV1() - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) + c.DiscoveryV1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { Source: "endpointslice/default/testendpoints", - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "", - "__meta_kubernetes_endpointslice_address_target_name": "", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", - "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", - "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, - Labels: map[model.LabelName]model.LabelValue{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - "__meta_kubernetes_namespace": "default", - }, }, }, }.Run(t) } func TestEndpointSliceDiscoveryUpdate(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + t.Parallel() + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{ - { - Addresses: []string{"1.2.3.4"}, - Hostname: strptr("testendpoint1"), - }, { - Addresses: []string{"2.3.4.5"}, - Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), - }, - }, - }, - } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) + obj := makeEndpointSliceV1() + obj.ObjectMeta.Labels = nil + obj.ObjectMeta.Annotations = nil + obj.Endpoints = obj.Endpoints[0:2] + c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ @@ -586,39 +388,11 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, }, Labels: model.LabelSet{ - "__meta_kubernetes_endpointslice_address_type": "IPv4", - "__meta_kubernetes_endpointslice_name": "testendpoints", - "__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints", - "__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true", - "__meta_kubernetes_endpointslice_annotation_test_annotation": "test", - "__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true", - "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testendpoints", + "__meta_kubernetes_namespace": "default", }, }, }, @@ -626,85 +400,19 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { } func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + t.Parallel() + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, afterStart: func() { - obj := &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testendpoints", - Namespace: "default", - }, - AddressType: v1beta1.AddressTypeIPv4, - Ports: []v1beta1.EndpointPort{ - { - Name: strptr("testport"), - Port: int32ptr(9000), - Protocol: protocolptr(corev1.ProtocolTCP), - }, - }, - Endpoints: []v1beta1.Endpoint{}, - } - c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) + obj := makeEndpointSliceV1() + obj.Endpoints = []v1.Endpoint{} + c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { - Targets: []model.LabelSet{ - { - "__address__": "1.2.3.4:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "", - "__meta_kubernetes_endpointslice_address_target_name": "", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", - "__meta_kubernetes_endpointslice_endpoint_node_name": "foobar", - "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", - "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - { - "__address__": "4.5.6.7:9000", - "__meta_kubernetes_endpointslice_address_target_kind": "Node", - "__meta_kubernetes_endpointslice_address_target_name": "barbaz", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", - "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", - "__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - }, - }, Labels: model.LabelSet{ "__meta_kubernetes_endpointslice_address_type": "IPv4", "__meta_kubernetes_endpointslice_name": "testendpoints", @@ -721,7 +429,8 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { } func TestEndpointSliceDiscoveryWithService(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + t.Parallel() + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, @@ -813,7 +522,8 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { } func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1()) + t.Parallel() + n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ discovery: n, @@ -920,6 +630,7 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { } func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { + t.Parallel() metadataConfig := AttachMetadataConfig{Node: true} nodeLabels1 := map[string]string{"az": "us-east1"} nodeLabels2 := map[string]string{"az": "us-west2"} @@ -1019,6 +730,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { } func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { + t.Parallel() metadataConfig := AttachMetadataConfig{Node: true} nodeLabels1 := map[string]string{"az": "us-east1"} nodeLabels2 := map[string]string{"az": "us-west2"} @@ -1124,6 +836,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { } func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { + t.Parallel() epOne := makeEndpointSliceV1() epOne.Namespace = "ns1" objs := []runtime.Object{ @@ -1285,6 +998,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", }, }, Labels: model.LabelSet{ @@ -1299,6 +1013,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { } func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { + t.Parallel() epOne := makeEndpointSliceV1() epOne.Namespace = "own-ns" @@ -1419,6 +1134,7 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { } func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) { + t.Parallel() ep := makeEndpointSliceV1() ep.Namespace = "ns" @@ -1465,6 +1181,7 @@ func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) { // sets up indexing for the main Kube informer only when needed. // See: https://github.com/prometheus/prometheus/pull/13554#discussion_r1490965817 func TestEndpointSliceInfIndexersCount(t *testing.T) { + t.Parallel() tests := []struct { name string withNodeMetadata bool @@ -1475,12 +1192,14 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { + t.Parallel() var ( - n *Discovery - mainInfIndexersCount int + n *Discovery + // service indexer is enabled by default + mainInfIndexersCount = 1 ) if tc.withNodeMetadata { - mainInfIndexersCount = 1 + mainInfIndexersCount++ n, _ = makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, AttachMetadataConfig{Node: true}) } else { n, _ = makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}) @@ -1498,3 +1217,166 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) { }) } } + +func TestEndpointSliceDiscoverySidecarContainer(t *testing.T) { + t.Parallel() + objs := []runtime.Object{ + &v1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testsidecar", + Namespace: "default", + }, + AddressType: v1.AddressTypeIPv4, + Ports: []v1.EndpointPort{ + { + Name: strptr("testport"), + Port: int32ptr(9000), + Protocol: protocolptr(corev1.ProtocolTCP), + }, + { + Name: strptr("initport"), + Port: int32ptr(9111), + Protocol: protocolptr(corev1.ProtocolTCP), + }, + }, + Endpoints: []v1.Endpoint{ + { + Addresses: []string{"4.3.2.1"}, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: corev1.PodSpec{ + NodeName: "testnode", + InitContainers: []corev1.Container{ + { + Name: "ic1", + Image: "ic1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "initport", + ContainerPort: 1111, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + { + Name: "ic2", + Image: "ic2:latest", + Ports: []corev1.ContainerPort{ + { + Name: "initport", + ContainerPort: 9111, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "c1", + Image: "c1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + }, + } + + n, _ := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpointslice/default/testsidecar": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Pod", + "__meta_kubernetes_endpointslice_address_target_name": "testpod", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_pod_container_image": "c1:latest", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "false", + }, + { + "__address__": "4.3.2.1:9111", + "__meta_kubernetes_endpointslice_address_target_kind": "Pod", + "__meta_kubernetes_endpointslice_address_target_name": "testpod", + "__meta_kubernetes_endpointslice_port": "9111", + "__meta_kubernetes_endpointslice_port_name": "initport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_pod_container_image": "ic2:latest", + "__meta_kubernetes_pod_container_name": "ic2", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "9111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + { + "__address__": "4.3.2.1:1111", + "__meta_kubernetes_pod_container_image": "ic1:latest", + "__meta_kubernetes_pod_container_name": "ic1", + "__meta_kubernetes_pod_container_port_name": "initport", + "__meta_kubernetes_pod_container_port_number": "1111", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_phase": "", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_uid": "deadbeef", + "__meta_kubernetes_pod_container_init": "true", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_endpointslice_address_type": "IPv4", + "__meta_kubernetes_endpointslice_name": "testsidecar", + "__meta_kubernetes_namespace": "default", + }, + Source: "endpointslice/default/testsidecar", + }, + }, + }.Run(t) +} diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index 7b6366b257..0de574471f 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -17,14 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -33,14 +31,14 @@ import ( // Ingress implements discovery of Kubernetes ingress. type Ingress struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewIngress returns a new ingress discovery. -func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { +func NewIngress(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd) ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate) ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete) @@ -67,7 +65,7 @@ func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C }, }) if err != nil { - level.Error(l).Log("msg", "Error adding ingresses event handler.", "err", err) + l.Error("Error adding ingresses event handler.", "err", err) } return s } @@ -87,7 +85,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(i.logger).Log("msg", "ingress informer unable to sync cache") + i.logger.Error("ingress informer unable to sync cache") } return } @@ -123,23 +121,18 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b return true } - var ia ingressAdaptor - switch ingress := o.(type) { - case *v1.Ingress: - ia = newIngressAdaptorFromV1(ingress) - case *v1beta1.Ingress: - ia = newIngressAdaptorFromV1beta1(ingress) - default: - level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err", + if ingress, ok := o.(*v1.Ingress); ok { + send(ctx, ch, i.buildIngress(*ingress)) + } else { + i.logger.Error("converting to Ingress object failed", "err", fmt.Errorf("received unexpected object: %v", o)) return true } - send(ctx, ch, i.buildIngress(ia)) return true } -func ingressSource(s ingressAdaptor) string { - return ingressSourceFromNamespaceAndName(s.namespace(), s.name()) +func ingressSource(s v1.Ingress) string { + return ingressSourceFromNamespaceAndName(s.Namespace, s.Name) } func ingressSourceFromNamespaceAndName(namespace, name string) string { @@ -153,15 +146,15 @@ const ( ingressClassNameLabel = metaLabelPrefix + "ingress_class_name" ) -func ingressLabels(ingress ingressAdaptor) model.LabelSet { +func ingressLabels(ingress v1.Ingress) model.LabelSet { // Each label and annotation will create two key-value pairs in the map. ls := make(model.LabelSet) - ls[namespaceLabel] = lv(ingress.namespace()) - if cls := ingress.ingressClassName(); cls != nil { + ls[namespaceLabel] = lv(ingress.Namespace) + if cls := ingress.Spec.IngressClassName; cls != nil { ls[ingressClassNameLabel] = lv(*cls) } - addObjectMetaLabels(ls, ingress.getObjectMeta(), RoleIngress) + addObjectMetaLabels(ls, ingress.ObjectMeta, RoleIngress) return ls } @@ -181,19 +174,39 @@ func pathsFromIngressPaths(ingressPaths []string) []string { return paths } -func (i *Ingress) buildIngress(ingress ingressAdaptor) *targetgroup.Group { +func rulePaths(rule v1.IngressRule) []string { + rv := rule.IngressRuleValue + if rv.HTTP == nil { + return nil + } + paths := make([]string, len(rv.HTTP.Paths)) + for n, p := range rv.HTTP.Paths { + paths[n] = p.Path + } + return paths +} + +func tlsHosts(ingressTLS []v1.IngressTLS) []string { + var hosts []string + for _, tls := range ingressTLS { + hosts = append(hosts, tls.Hosts...) + } + return hosts +} + +func (i *Ingress) buildIngress(ingress v1.Ingress) *targetgroup.Group { tg := &targetgroup.Group{ Source: ingressSource(ingress), } tg.Labels = ingressLabels(ingress) - for _, rule := range ingress.rules() { + for _, rule := range ingress.Spec.Rules { scheme := "http" - paths := pathsFromIngressPaths(rule.paths()) + paths := pathsFromIngressPaths(rulePaths(rule)) out: - for _, pattern := range ingress.tlsHosts() { - if matchesHostnamePattern(pattern, rule.host()) { + for _, pattern := range tlsHosts(ingress.Spec.TLS) { + if matchesHostnamePattern(pattern, rule.Host) { scheme = "https" break out } @@ -201,9 +214,9 @@ func (i *Ingress) buildIngress(ingress ingressAdaptor) *targetgroup.Group { for _, path := range paths { tg.Targets = append(tg.Targets, model.LabelSet{ - model.AddressLabel: lv(rule.host()), + model.AddressLabel: lv(rule.Host), ingressSchemeLabel: lv(scheme), - ingressHostLabel: lv(rule.host()), + ingressHostLabel: lv(rule.Host), ingressPathLabel: lv(path), }) } diff --git a/discovery/kubernetes/ingress_adaptor.go b/discovery/kubernetes/ingress_adaptor.go deleted file mode 100644 index d1a7b7f2a2..0000000000 --- a/discovery/kubernetes/ingress_adaptor.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package kubernetes - -import ( - v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ingressAdaptor is an adaptor for the different Ingress versions. -type ingressAdaptor interface { - getObjectMeta() metav1.ObjectMeta - name() string - namespace() string - labels() map[string]string - annotations() map[string]string - tlsHosts() []string - ingressClassName() *string - rules() []ingressRuleAdaptor -} - -type ingressRuleAdaptor interface { - paths() []string - host() string -} - -// Adaptor for networking.k8s.io/v1. -type ingressAdaptorV1 struct { - ingress *v1.Ingress -} - -func newIngressAdaptorFromV1(ingress *v1.Ingress) ingressAdaptor { - return &ingressAdaptorV1{ingress: ingress} -} - -func (i *ingressAdaptorV1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } -func (i *ingressAdaptorV1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } - -func (i *ingressAdaptorV1) tlsHosts() []string { - var hosts []string - for _, tls := range i.ingress.Spec.TLS { - hosts = append(hosts, tls.Hosts...) - } - return hosts -} - -func (i *ingressAdaptorV1) rules() []ingressRuleAdaptor { - var rules []ingressRuleAdaptor - for _, rule := range i.ingress.Spec.Rules { - rules = append(rules, newIngressRuleAdaptorFromV1(rule)) - } - return rules -} - -type ingressRuleAdaptorV1 struct { - rule v1.IngressRule -} - -func newIngressRuleAdaptorFromV1(rule v1.IngressRule) ingressRuleAdaptor { - return &ingressRuleAdaptorV1{rule: rule} -} - -func (i *ingressRuleAdaptorV1) paths() []string { - rv := i.rule.IngressRuleValue - if rv.HTTP == nil { - return nil - } - paths := make([]string, len(rv.HTTP.Paths)) - for n, p := range rv.HTTP.Paths { - paths[n] = p.Path - } - return paths -} - -func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host } - -// Adaptor for networking.k8s.io/v1beta1. -type ingressAdaptorV1Beta1 struct { - ingress *v1beta1.Ingress -} - -func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor { - return &ingressAdaptorV1Beta1{ingress: ingress} -} -func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } -func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } - -func (i *ingressAdaptorV1Beta1) tlsHosts() []string { - var hosts []string - for _, tls := range i.ingress.Spec.TLS { - hosts = append(hosts, tls.Hosts...) - } - return hosts -} - -func (i *ingressAdaptorV1Beta1) rules() []ingressRuleAdaptor { - var rules []ingressRuleAdaptor - for _, rule := range i.ingress.Spec.Rules { - rules = append(rules, newIngressRuleAdaptorFromV1Beta1(rule)) - } - return rules -} - -type ingressRuleAdaptorV1Beta1 struct { - rule v1beta1.IngressRule -} - -func newIngressRuleAdaptorFromV1Beta1(rule v1beta1.IngressRule) ingressRuleAdaptor { - return &ingressRuleAdaptorV1Beta1{rule: rule} -} - -func (i *ingressRuleAdaptorV1Beta1) paths() []string { - rv := i.rule.IngressRuleValue - if rv.HTTP == nil { - return nil - } - paths := make([]string, len(rv.HTTP.Paths)) - for n, p := range rv.HTTP.Paths { - paths[n] = p.Path - } - return paths -} - -func (i *ingressRuleAdaptorV1Beta1) host() string { return i.rule.Host } diff --git a/discovery/kubernetes/ingress_test.go b/discovery/kubernetes/ingress_test.go index 8e6654c2cc..a828dee27f 100644 --- a/discovery/kubernetes/ingress_test.go +++ b/discovery/kubernetes/ingress_test.go @@ -20,7 +20,6 @@ import ( "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -89,60 +88,6 @@ func makeIngress(tls TLSMode) *v1.Ingress { return ret } -func makeIngressV1beta1(tls TLSMode) *v1beta1.Ingress { - ret := &v1beta1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testingress", - Namespace: "default", - Labels: map[string]string{"test/label": "testvalue"}, - Annotations: map[string]string{"test/annotation": "testannotationvalue"}, - }, - Spec: v1beta1.IngressSpec{ - IngressClassName: classString("testclass"), - TLS: nil, - Rules: []v1beta1.IngressRule{ - { - Host: "example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{ - {Path: "/"}, - {Path: "/foo"}, - }, - }, - }, - }, - { - // No backend config, ignored - Host: "nobackend.example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{}, - }, - }, - { - Host: "test.example.com", - IngressRuleValue: v1beta1.IngressRuleValue{ - HTTP: &v1beta1.HTTPIngressRuleValue{ - Paths: []v1beta1.HTTPIngressPath{{}}, - }, - }, - }, - }, - }, - } - - switch tls { - case TLSYes: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}} - case TLSMixed: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com"}}} - case TLSWildcard: - ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"*.example.com"}}} - } - - return ret -} - func classString(v string) *string { return &v } @@ -199,6 +144,7 @@ func expectedTargetGroups(ns string, tls TLSMode) map[string]*targetgroup.Group } func TestIngressDiscoveryAdd(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ @@ -212,21 +158,8 @@ func TestIngressDiscoveryAdd(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSNo) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSNo), - }.Run(t) -} - func TestIngressDiscoveryAddTLS(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ @@ -240,21 +173,8 @@ func TestIngressDiscoveryAddTLS(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddTLSV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSYes) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSYes), - }.Run(t) -} - func TestIngressDiscoveryAddMixed(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ @@ -268,21 +188,8 @@ func TestIngressDiscoveryAddMixed(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryAddMixedV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0") - - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - obj := makeIngressV1beta1(TLSMixed) - c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{}) - }, - expectedMaxItems: 1, - expectedRes: expectedTargetGroups("default", TLSMixed), - }.Run(t) -} - func TestIngressDiscoveryNamespaces(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) expected := expectedTargetGroups("ns1", TLSNo) @@ -303,28 +210,8 @@ func TestIngressDiscoveryNamespaces(t *testing.T) { }.Run(t) } -func TestIngressDiscoveryNamespacesV1beta1(t *testing.T) { - n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}, "v1.18.0") - - expected := expectedTargetGroups("ns1", TLSNo) - for k, v := range expectedTargetGroups("ns2", TLSNo) { - expected[k] = v - } - k8sDiscoveryTest{ - discovery: n, - afterStart: func() { - for _, ns := range []string{"ns1", "ns2"} { - obj := makeIngressV1beta1(TLSNo) - obj.Namespace = ns - c.NetworkingV1beta1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{}) - } - }, - expectedMaxItems: 2, - expectedRes: expected, - }.Run(t) -} - func TestIngressDiscoveryOwnNamespace(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true}) expected := expectedTargetGroups("own-ns", TLSNo) diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index a8b6f85899..2261fb3efe 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -17,42 +17,36 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "reflect" + "slices" "strings" "sync" "time" - "github.com/prometheus/prometheus/util/strutil" - - disv1beta1 "k8s.io/api/discovery/v1beta1" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" apiv1 "k8s.io/api/core/v1" disv1 "k8s.io/api/discovery/v1" networkv1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // Required to get the GCP auth provider working. "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" - // Required to get the GCP auth provider working. - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" ) const ( @@ -63,14 +57,10 @@ const ( presentValue = model.LabelValue("true") ) -var ( - // Http header. - userAgent = fmt.Sprintf("Prometheus/%s", version.Version) - // DefaultSDConfig is the default Kubernetes SD configuration. - DefaultSDConfig = SDConfig{ - HTTPClientConfig: config.DefaultHTTPClientConfig, - } -) +// DefaultSDConfig is the default Kubernetes SD configuration. +var DefaultSDConfig = SDConfig{ + HTTPClientConfig: config.DefaultHTTPClientConfig, +} func init() { discovery.RegisterConfig(&SDConfig{}) @@ -177,7 +167,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if c.Role == "" { - return fmt.Errorf("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)") + return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)") } err = c.HTTPClientConfig.Validate() if err != nil { @@ -185,20 +175,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } if c.APIServer.URL != nil && c.KubeConfig != "" { // Api-server and kubeconfig_file are mutually exclusive - return fmt.Errorf("cannot use 'kubeconfig_file' and 'api_server' simultaneously") + return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously") } if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { // Kubeconfig_file and custom http config are mutually exclusive - return fmt.Errorf("cannot use a custom HTTP client configuration together with 'kubeconfig_file'") + return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'") } if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { - return fmt.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") + return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") } if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace { - return fmt.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously") + return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously") } if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace { - return fmt.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously") + return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously") } foundSelectorRoles := make(map[Role]struct{}) @@ -220,18 +210,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if _, ok := allowedSelectors[c.Role]; !ok { return fmt.Errorf("invalid role: %q, expecting one of: pod, service, endpoints, endpointslice, node or ingress", c.Role) } - var allowed bool - for _, role := range allowedSelectors[c.Role] { - if role == string(selector.Role) { - allowed = true - break - } - } - - if !allowed { + if !slices.Contains(allowedSelectors[c.Role], string(selector.Role)) { return fmt.Errorf("%s role supports only %s selectors", c.Role, strings.Join(allowedSelectors[c.Role], ", ")) } - _, err := fields.ParseSelector(selector.Field) if err != nil { return err @@ -264,7 +245,7 @@ type Discovery struct { sync.RWMutex client kubernetes.Interface role Role - logger log.Logger + logger *slog.Logger namespaceDiscovery *NamespaceDiscovery discoverers []discovery.Discoverer selectors roleSelector @@ -289,14 +270,14 @@ func (d *Discovery) getNamespaces() []string { } // New creates a new Kubernetes discovery for the given role. -func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { +func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { m, ok := metrics.(*kubernetesMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } var ( kcfg *rest.Config @@ -328,7 +309,7 @@ func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Di ownNamespace = string(ownNamespaceContents) } - level.Info(l).Log("msg", "Using pod service account via in-cluster config") + l.Info("Using pod service account via in-cluster config") default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { @@ -340,7 +321,7 @@ func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Di } } - kcfg.UserAgent = userAgent + kcfg.UserAgent = version.PrometheusUserAgent() kcfg.ContentType = "application/vnd.kubernetes.protobuf" c, err := kubernetes.NewForConfig(kcfg) @@ -401,55 +382,22 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { switch d.role { case RoleEndpointSlice: - // Check "networking.k8s.io/v1" availability with retries. - // If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility - var v1Supported bool - if retryOnError(ctx, 10*time.Second, - func() (err error) { - v1Supported, err = checkDiscoveryV1Supported(d.client) - if err != nil { - level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err) - } - return err - }, - ) { - d.Unlock() - return - } - for _, namespace := range namespaces { var informer cache.SharedIndexInformer - if v1Supported { - e := d.client.DiscoveryV1().EndpointSlices(namespace) - elw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.Watch(ctx, options) - }, - } - informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{}) - } else { - e := d.client.DiscoveryV1beta1().EndpointSlices(namespace) - elw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.Watch(ctx, options) - }, - } - informer = d.newEndpointSlicesByNodeInformer(elw, &disv1beta1.EndpointSlice{}) + e := d.client.DiscoveryV1().EndpointSlices(namespace) + elw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.endpointslice.field + options.LabelSelector = d.selectors.endpointslice.label + return e.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.endpointslice.field + options.LabelSelector = d.selectors.endpointslice.label + return e.Watch(ctx, options) + }, } + informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{}) s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ @@ -483,7 +431,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { go nodeInf.Run(ctx.Done()) } eps := NewEndpointSlice( - log.With(d.logger, "role", "endpointslice"), + d.logger.With("role", "endpointslice"), informer, d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), @@ -543,7 +491,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } eps := NewEndpoints( - log.With(d.logger, "role", "endpoint"), + d.logger.With("role", "endpoint"), d.newEndpointsByNodeInformer(elw), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), @@ -577,7 +525,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { }, } pod := NewPod( - log.With(d.logger, "role", "pod"), + d.logger.With("role", "pod"), d.newPodsByNodeInformer(plw), nodeInformer, d.metrics.eventCount, @@ -601,7 +549,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { }, } svc := NewService( - log.With(d.logger, "role", "service"), + d.logger.With("role", "service"), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.metrics.eventCount, ) @@ -609,57 +557,24 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { go svc.informer.Run(ctx.Done()) } case RoleIngress: - // Check "networking.k8s.io/v1" availability with retries. - // If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility - var v1Supported bool - if retryOnError(ctx, 10*time.Second, - func() (err error) { - v1Supported, err = checkNetworkingV1Supported(d.client) - if err != nil { - level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err) - } - return err - }, - ) { - d.Unlock() - return - } - for _, namespace := range namespaces { var informer cache.SharedInformer - if v1Supported { - i := d.client.NetworkingV1().Ingresses(namespace) - ilw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.Watch(ctx, options) - }, - } - informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) - } else { - i := d.client.NetworkingV1beta1().Ingresses(namespace) - ilw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.Watch(ctx, options) - }, - } - informer = d.mustNewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled) + i := d.client.NetworkingV1().Ingresses(namespace) + ilw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label + return i.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label + return i.Watch(ctx, options) + }, } + informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) ingress := NewIngress( - log.With(d.logger, "role", "ingress"), + d.logger.With("role", "ingress"), informer, d.metrics.eventCount, ) @@ -668,11 +583,11 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } case RoleNode: nodeInformer := d.newNodeInformer(ctx) - node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.metrics.eventCount) + node := NewNode(d.logger.With("role", "node"), nodeInformer, d.metrics.eventCount) d.discoverers = append(d.discoverers, node) go node.informer.Run(ctx.Done()) default: - level.Error(d.logger).Log("msg", "unknown Kubernetes discovery kind", "role", d.role) + d.logger.Error("unknown Kubernetes discovery kind", "role", d.role) } var wg sync.WaitGroup @@ -720,20 +635,6 @@ func retryOnError(ctx context.Context, interval time.Duration, f func() error) ( } } -func checkNetworkingV1Supported(client kubernetes.Interface) (bool, error) { - k8sVer, err := client.Discovery().ServerVersion() - if err != nil { - return false, err - } - semVer, err := utilversion.ParseSemantic(k8sVer.String()) - if err != nil { - return false, err - } - // networking.k8s.io/v1 is available since Kubernetes v1.19 - // https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md - return semVer.Major() >= 1 && semVer.Minor() >= 19, nil -} - func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer { nlw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { @@ -756,7 +657,7 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde indexers[nodeIndex] = func(obj interface{}) ([]string, error) { pod, ok := obj.(*apiv1.Pod) if !ok { - return nil, fmt.Errorf("object is not a pod") + return nil, errors.New("object is not a pod") } return []string{pod.Spec.NodeName}, nil } @@ -770,7 +671,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share indexers[podIndex] = func(obj interface{}) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { - return nil, fmt.Errorf("object is not endpoints") + return nil, errors.New("object is not endpoints") } var pods []string for _, target := range e.Subsets { @@ -789,7 +690,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share indexers[nodeIndex] = func(obj interface{}) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { - return nil, fmt.Errorf("object is not endpoints") + return nil, errors.New("object is not endpoints") } var nodes []string for _, target := range e.Subsets { @@ -814,41 +715,41 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) + indexers[serviceIndex] = func(obj interface{}) ([]string, error) { + e, ok := obj.(*disv1.EndpointSlice) + if !ok { + return nil, errors.New("object is not an endpointslice") + } + + svcName, exists := e.Labels[disv1.LabelServiceName] + if !exists { + return nil, nil + } + + return []string{namespacedName(e.Namespace, svcName)}, nil + } if !d.attachMetadata.Node { return d.mustNewSharedIndexInformer(plw, object, resyncDisabled, indexers) } indexers[nodeIndex] = func(obj interface{}) ([]string, error) { + e, ok := obj.(*disv1.EndpointSlice) + if !ok { + return nil, errors.New("object is not an endpointslice") + } + var nodes []string - switch e := obj.(type) { - case *disv1.EndpointSlice: - for _, target := range e.Endpoints { - if target.TargetRef != nil { - switch target.TargetRef.Kind { - case "Pod": - if target.NodeName != nil { - nodes = append(nodes, *target.NodeName) - } - case "Node": - nodes = append(nodes, target.TargetRef.Name) + for _, target := range e.Endpoints { + if target.TargetRef != nil { + switch target.TargetRef.Kind { + case "Pod": + if target.NodeName != nil { + nodes = append(nodes, *target.NodeName) } + case "Node": + nodes = append(nodes, target.TargetRef.Name) } } - case *disv1beta1.EndpointSlice: - for _, target := range e.Endpoints { - if target.TargetRef != nil { - switch target.TargetRef.Kind { - case "Pod": - if target.NodeName != nil { - nodes = append(nodes, *target.NodeName) - } - case "Node": - nodes = append(nodes, target.TargetRef.Name) - } - } - } - default: - return nil, fmt.Errorf("object is not an endpointslice") } return nodes, nil @@ -882,21 +783,6 @@ func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleOb return informer } -func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { - k8sVer, err := client.Discovery().ServerVersion() - if err != nil { - return false, err - } - semVer, err := utilversion.ParseSemantic(k8sVer.String()) - if err != nil { - return false, err - } - // The discovery.k8s.io/v1beta1 API version of EndpointSlice will no longer be served in v1.25. - // discovery.k8s.io/v1 is available since Kubernetes v1.21 - // https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25 - return semVer.Major() >= 1 && semVer.Minor() >= 21, nil -} - func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) { labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name) @@ -916,3 +802,13 @@ func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, func namespacedName(namespace, name string) string { return namespace + "/" + name } + +// nodeName knows how to handle the cache.DeletedFinalStateUnknown tombstone. +// It assumes the MetaNamespaceKeyFunc keyFunc is used, which uses the node name as the tombstone key. +func nodeName(o interface{}) (string, error) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(o) + if err != nil { + return "", err + } + return key, nil +} diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index 552f8a4453..fb53032845 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -20,10 +20,13 @@ import ( "testing" "time" - "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" + apiv1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/version" "k8s.io/apimachinery/pkg/watch" @@ -33,8 +36,6 @@ import ( kubetesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/testutil" @@ -46,7 +47,7 @@ func TestMain(m *testing.M) { // makeDiscovery creates a kubernetes.Discovery instance for testing. func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) { - return makeDiscoveryWithVersion(role, nsDiscovery, "v1.22.0", objects...) + return makeDiscoveryWithVersion(role, nsDiscovery, "v1.25.0", objects...) } // makeDiscoveryWithVersion creates a kubernetes.Discovery instance with the specified kubernetes version for testing. @@ -71,7 +72,7 @@ func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer d := &Discovery{ client: clientset, - logger: log.NewNopLogger(), + logger: promslog.NewNopLogger(), role: role, namespaceDiscovery: &nsDiscovery, ownNamespace: "own-ns", @@ -154,7 +155,7 @@ func (d k8sDiscoveryTest) Run(t *testing.T) { // readResultWithTimeout reads all targetgroups from channel with timeout. // It merges targetgroups by source and sends the result to result channel. -func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, max int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) { +func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) { res := make(map[string]*targetgroup.Group) timeout := time.After(stopAfter) Loop: @@ -167,7 +168,7 @@ Loop: } res[tg.Source] = tg } - if len(res) == max { + if len(res) == maxGroups { // Reached max target groups we may get, break fast. break Loop } @@ -175,10 +176,10 @@ Loop: // Because we use queue, an object that is created then // deleted or updated may be processed only once. // So possibly we may skip events, timed out here. - t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max) + t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups) break Loop case <-ctx.Done(): - t.Logf("stopped, got %d (max: %d) items", len(res), max) + t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups) break Loop } } @@ -197,7 +198,7 @@ func requireTargetGroups(t *testing.T, expected, res map[string]*targetgroup.Gro panic(err) } - require.Equal(t, string(b1), string(b2)) + require.JSONEq(t, string(b1), string(b2)) } // marshalTargetGroups serializes a set of target groups to JSON, ignoring the @@ -271,6 +272,7 @@ func (s *Service) hasSynced() bool { } func TestRetryOnError(t *testing.T) { + t.Parallel() for _, successAt := range []int{1, 2, 3} { var called int f := func() error { @@ -285,41 +287,8 @@ func TestRetryOnError(t *testing.T) { } } -func TestCheckNetworkingV1Supported(t *testing.T) { - tests := []struct { - version string - wantSupported bool - wantErr bool - }{ - {version: "v1.18.0", wantSupported: false, wantErr: false}, - {version: "v1.18.1", wantSupported: false, wantErr: false}, - // networking v1 is supported since Kubernetes v1.19 - {version: "v1.19.0", wantSupported: true, wantErr: false}, - {version: "v1.20.0-beta.2", wantSupported: true, wantErr: false}, - // error patterns - {version: "", wantSupported: false, wantErr: true}, - {version: "<>", wantSupported: false, wantErr: true}, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.version, func(t *testing.T) { - clientset := fake.NewSimpleClientset() - fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery) - fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: tc.version} - supported, err := checkNetworkingV1Supported(clientset) - - if tc.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - require.Equal(t, tc.wantSupported, supported) - }) - } -} - func TestFailuresCountMetric(t *testing.T) { + t.Parallel() tests := []struct { role Role minFailedWatches int @@ -342,7 +311,7 @@ func TestFailuresCountMetric(t *testing.T) { require.Equal(t, float64(0), prom_testutil.ToFloat64(n.metrics.failuresCount)) // Simulate an error on watch requests. - c.Discovery().(*fakediscovery.FakeDiscovery).PrependWatchReactor("*", func(action kubetesting.Action) (bool, watch.Interface, error) { + c.Discovery().(*fakediscovery.FakeDiscovery).PrependWatchReactor("*", func(_ kubetesting.Action) (bool, watch.Interface, error) { return true, nil, apierrors.NewUnauthorized("unauthorized") }) @@ -354,3 +323,19 @@ func TestFailuresCountMetric(t *testing.T) { }) } } + +func TestNodeName(t *testing.T) { + t.Parallel() + node := &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + } + name, err := nodeName(node) + require.NoError(t, err) + require.Equal(t, "foo", name) + + name, err = nodeName(cache.DeletedFinalStateUnknown{Key: "bar"}) + require.NoError(t, err) + require.Equal(t, "bar", name) +} diff --git a/discovery/kubernetes/metrics.go b/discovery/kubernetes/metrics.go index fe419bc782..ba3cb1d32a 100644 --- a/discovery/kubernetes/metrics.go +++ b/discovery/kubernetes/metrics.go @@ -28,7 +28,7 @@ type kubernetesMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &kubernetesMetrics{ eventCount: prometheus.NewCounterVec( prometheus.CounterOpts{ diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index 74d87e22c4..0e0c5745f2 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -38,16 +38,16 @@ const ( // Node discovers Kubernetes nodes. type Node struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewNode returns a new node discovery. -func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { +func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd) @@ -76,13 +76,13 @@ func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.Coun }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } return n } func (n *Node) enqueue(obj interface{}) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + key, err := nodeName(obj) if err != nil { return } @@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), n.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(n.logger).Log("msg", "node informer unable to sync cache") + n.logger.Error("node informer unable to sync cache") } return } @@ -133,7 +133,7 @@ func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool } node, err := convertToNode(o) if err != nil { - level.Error(n.logger).Log("msg", "converting to Node object failed", "err", err) + n.logger.Error("converting to Node object failed", "err", err) return true } send(ctx, ch, n.buildNode(node)) @@ -181,7 +181,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { addr, addrMap, err := nodeAddress(node) if err != nil { - level.Warn(n.logger).Log("msg", "No node address found", "err", err) + n.logger.Warn("No node address found", "err", err) return nil } addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10)) diff --git a/discovery/kubernetes/node_test.go b/discovery/kubernetes/node_test.go index bbf7a6b27c..bc17efdc01 100644 --- a/discovery/kubernetes/node_test.go +++ b/discovery/kubernetes/node_test.go @@ -56,6 +56,7 @@ func makeEnumeratedNode(i int) *v1.Node { } func TestNodeDiscoveryBeforeStart(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -95,6 +96,7 @@ func TestNodeDiscoveryBeforeStart(t *testing.T) { } func TestNodeDiscoveryAdd(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -124,6 +126,7 @@ func TestNodeDiscoveryAdd(t *testing.T) { } func TestNodeDiscoveryDelete(t *testing.T) { + t.Parallel() obj := makeEnumeratedNode(0) n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}, obj) @@ -142,6 +145,7 @@ func TestNodeDiscoveryDelete(t *testing.T) { } func TestNodeDiscoveryUpdate(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}) k8sDiscoveryTest{ diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 02990e415f..169c6a78a1 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -17,14 +17,14 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" @@ -44,14 +44,14 @@ type Pod struct { nodeInf cache.SharedInformer withNodeMetadata bool store cache.Store - logger log.Logger + logger *slog.Logger queue *workqueue.Type } // NewPod creates a new pod discovery. -func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { +func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd) @@ -81,7 +81,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } if p.withNodeMetadata { @@ -95,12 +95,15 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo p.enqueuePodsForNode(node.Name) }, DeleteFunc: func(o interface{}) { - node := o.(*apiv1.Node) - p.enqueuePodsForNode(node.Name) + nodeName, err := nodeName(o) + if err != nil { + l.Error("Error getting Node name", "err", err) + } + p.enqueuePodsForNode(nodeName) }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } } @@ -127,7 +130,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(p.logger).Log("msg", "pod informer unable to sync cache") + p.logger.Error("pod informer unable to sync cache") } return } @@ -164,7 +167,7 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool } pod, err := convertToPod(o) if err != nil { - level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err) + p.logger.Error("converting to Pod object failed", "err", err) return true } send(ctx, ch, p.buildPod(pod)) @@ -216,7 +219,7 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { podPhaseLabel: lv(string(pod.Status.Phase)), podNodeNameLabel: lv(pod.Spec.NodeName), podHostIPLabel: lv(pod.Status.HostIP), - podUID: lv(string(pod.ObjectMeta.UID)), + podUID: lv(string(pod.UID)), } addObjectMetaLabels(ls, pod.ObjectMeta, RolePod) @@ -246,7 +249,7 @@ func (p *Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containe func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string { cStatus, err := p.findPodContainerStatus(statuses, containerName) if err != nil { - level.Debug(p.logger).Log("msg", "cannot find container ID", "err", err) + p.logger.Debug("cannot find container ID", "err", err) return "" } return cStatus.ContainerID @@ -315,7 +318,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { func (p *Pod) enqueuePodsForNode(nodeName string) { pods, err := p.podInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(p.logger).Log("msg", "Error getting pods for node", "node", nodeName, "err", err) + p.logger.Error("Error getting pods for node", "node", nodeName, "err", err) return } diff --git a/discovery/kubernetes/pod_test.go b/discovery/kubernetes/pod_test.go index 286a1a230d..7a3079a265 100644 --- a/discovery/kubernetes/pod_test.go +++ b/discovery/kubernetes/pod_test.go @@ -239,6 +239,7 @@ func expectedPodTargetGroupsWithNodeMeta(ns, nodeName string, nodeLabels map[str } func TestPodDiscoveryBeforeRun(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -302,6 +303,7 @@ func TestPodDiscoveryBeforeRun(t *testing.T) { } func TestPodDiscoveryInitContainer(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) ns := "default" @@ -329,6 +331,7 @@ func TestPodDiscoveryInitContainer(t *testing.T) { } func TestPodDiscoveryAdd(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -343,6 +346,7 @@ func TestPodDiscoveryAdd(t *testing.T) { } func TestPodDiscoveryDelete(t *testing.T) { + t.Parallel() obj := makePods() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}, obj) @@ -362,6 +366,7 @@ func TestPodDiscoveryDelete(t *testing.T) { } func TestPodDiscoveryUpdate(t *testing.T) { + t.Parallel() obj := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", @@ -403,6 +408,7 @@ func TestPodDiscoveryUpdate(t *testing.T) { } func TestPodDiscoveryUpdateEmptyPodIP(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) initialPod := makePods() @@ -427,6 +433,7 @@ func TestPodDiscoveryUpdateEmptyPodIP(t *testing.T) { } func TestPodDiscoveryNamespaces(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) expected := expectedPodTargetGroups("ns1") @@ -448,6 +455,7 @@ func TestPodDiscoveryNamespaces(t *testing.T) { } func TestPodDiscoveryOwnNamespace(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{IncludeOwnNamespace: true}) expected := expectedPodTargetGroups("own-ns") @@ -466,6 +474,7 @@ func TestPodDiscoveryOwnNamespace(t *testing.T) { } func TestPodDiscoveryWithNodeMetadata(t *testing.T) { + t.Parallel() attachMetadata := AttachMetadataConfig{Node: true} n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata) nodeLbls := map[string]string{"l1": "v1"} @@ -485,6 +494,7 @@ func TestPodDiscoveryWithNodeMetadata(t *testing.T) { } func TestPodDiscoveryWithNodeMetadataUpdateNode(t *testing.T) { + t.Parallel() nodeLbls := map[string]string{"l2": "v2"} attachMetadata := AttachMetadataConfig{Node: true} n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata) diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 51204a5a1a..e666497c86 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -33,16 +33,16 @@ import ( // Service implements discovery of Kubernetes services. type Service struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewService returns a new service discovery. -func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { +func NewService(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) @@ -71,7 +71,7 @@ func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } return s } @@ -91,7 +91,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(s.logger).Log("msg", "service informer unable to sync cache") + s.logger.Error("service informer unable to sync cache") } return } @@ -128,7 +128,7 @@ func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) b } eps, err := convertToService(o) if err != nil { - level.Error(s.logger).Log("msg", "converting to Service object failed", "err", err) + s.logger.Error("converting to Service object failed", "err", err) return true } send(ctx, ch, s.buildService(eps)) diff --git a/discovery/kubernetes/service_test.go b/discovery/kubernetes/service_test.go index dde3aaea57..8386ef296a 100644 --- a/discovery/kubernetes/service_test.go +++ b/discovery/kubernetes/service_test.go @@ -118,6 +118,7 @@ func makeLoadBalancerService() *v1.Service { } func TestServiceDiscoveryAdd(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -189,6 +190,7 @@ func TestServiceDiscoveryAdd(t *testing.T) { } func TestServiceDiscoveryDelete(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService()) k8sDiscoveryTest{ @@ -207,6 +209,7 @@ func TestServiceDiscoveryDelete(t *testing.T) { } func TestServiceDiscoveryUpdate(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService()) k8sDiscoveryTest{ @@ -251,6 +254,7 @@ func TestServiceDiscoveryUpdate(t *testing.T) { } func TestServiceDiscoveryNamespaces(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) k8sDiscoveryTest{ @@ -303,6 +307,7 @@ func TestServiceDiscoveryNamespaces(t *testing.T) { } func TestServiceDiscoveryOwnNamespace(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{IncludeOwnNamespace: true}) k8sDiscoveryTest{ @@ -338,6 +343,7 @@ func TestServiceDiscoveryOwnNamespace(t *testing.T) { } func TestServiceDiscoveryAllNamespaces(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}) k8sDiscoveryTest{ diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go deleted file mode 100644 index 6fc61485d1..0000000000 --- a/discovery/legacymanager/manager.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "context" - "fmt" - "reflect" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" -) - -type poolKey struct { - setName string - provider string -} - -// provider holds a Discoverer instance, its configuration and its subscribers. -type provider struct { - name string - d discovery.Discoverer - subs []string - config interface{} -} - -// NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]discovery.DiscovererMetrics, options ...func(*Manager)) *Manager { - if logger == nil { - logger = log.NewNopLogger() - } - mgr := &Manager{ - logger: logger, - syncCh: make(chan map[string][]*targetgroup.Group), - targets: make(map[poolKey]map[string]*targetgroup.Group), - discoverCancel: []context.CancelFunc{}, - ctx: ctx, - updatert: 5 * time.Second, - triggerSend: make(chan struct{}, 1), - registerer: registerer, - sdMetrics: sdMetrics, - } - for _, option := range options { - option(mgr) - } - - // Register the metrics. - // We have to do this after setting all options, so that the name of the Manager is set. - if metrics, err := discovery.NewManagerMetrics(registerer, mgr.name); err == nil { - mgr.metrics = metrics - } else { - level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) - return nil - } - - return mgr -} - -// Name sets the name of the manager. -func Name(n string) func(*Manager) { - return func(m *Manager) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.name = n - } -} - -// Manager maintains a set of discovery providers and sends each update to a map channel. -// Targets are grouped by the target set name. -type Manager struct { - logger log.Logger - name string - mtx sync.RWMutex - ctx context.Context - discoverCancel []context.CancelFunc - - // Some Discoverers(eg. k8s) send only the updates for a given target group - // so we use map[tg.Source]*targetgroup.Group to know which group to update. - targets map[poolKey]map[string]*targetgroup.Group - // providers keeps track of SD providers. - providers []*provider - // The sync channel sends the updates as a map where the key is the job value from the scrape config. - syncCh chan map[string][]*targetgroup.Group - - // How long to wait before sending updates to the channel. The variable - // should only be modified in unit tests. - updatert time.Duration - - // The triggerSend channel signals to the manager that new updates have been received from providers. - triggerSend chan struct{} - - // A registerer for all service discovery metrics. - registerer prometheus.Registerer - - metrics *discovery.Metrics - sdMetrics map[string]discovery.DiscovererMetrics -} - -// Run starts the background processing. -func (m *Manager) Run() error { - go m.sender() - <-m.ctx.Done() - m.cancelDiscoverers() - return m.ctx.Err() -} - -// SyncCh returns a read only channel used by all the clients to receive target updates. -func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group { - return m.syncCh -} - -// ApplyConfig removes all running discovery providers and starts new ones using the provided config. -func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - for pk := range m.targets { - if _, ok := cfg[pk.setName]; !ok { - m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, pk.setName) - } - } - m.cancelDiscoverers() - m.targets = make(map[poolKey]map[string]*targetgroup.Group) - m.providers = nil - m.discoverCancel = nil - - failedCount := 0 - for name, scfg := range cfg { - failedCount += m.registerProviders(scfg, name) - m.metrics.DiscoveredTargets.WithLabelValues(name).Set(0) - } - m.metrics.FailedConfigs.Set(float64(failedCount)) - - for _, prov := range m.providers { - m.startProvider(m.ctx, prov) - } - - return nil -} - -// StartCustomProvider is used for sdtool. Only use this if you know what you're doing. -func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker discovery.Discoverer) { - p := &provider{ - name: name, - d: worker, - subs: []string{name}, - } - m.providers = append(m.providers, p) - m.startProvider(ctx, p) -} - -func (m *Manager) startProvider(ctx context.Context, p *provider) { - level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) - ctx, cancel := context.WithCancel(ctx) - updates := make(chan []*targetgroup.Group) - - m.discoverCancel = append(m.discoverCancel, cancel) - - go p.d.Run(ctx, updates) - go m.updater(ctx, p, updates) -} - -func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) { - for { - select { - case <-ctx.Done(): - return - case tgs, ok := <-updates: - m.metrics.ReceivedUpdates.Inc() - if !ok { - level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) - return - } - - for _, s := range p.subs { - m.updateGroup(poolKey{setName: s, provider: p.name}, tgs) - } - - select { - case m.triggerSend <- struct{}{}: - default: - } - } - } -} - -func (m *Manager) sender() { - ticker := time.NewTicker(m.updatert) - defer ticker.Stop() - - for { - select { - case <-m.ctx.Done(): - return - case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker. - select { - case <-m.triggerSend: - m.metrics.SentUpdates.Inc() - select { - case m.syncCh <- m.allGroups(): - default: - m.metrics.DelayedUpdates.Inc() - level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") - select { - case m.triggerSend <- struct{}{}: - default: - } - } - default: - } - } - } -} - -func (m *Manager) cancelDiscoverers() { - for _, c := range m.discoverCancel { - c() - } -} - -func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if _, ok := m.targets[poolKey]; !ok { - m.targets[poolKey] = make(map[string]*targetgroup.Group) - } - for _, tg := range tgs { - if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. - m.targets[poolKey][tg.Source] = tg - } - } -} - -func (m *Manager) allGroups() map[string][]*targetgroup.Group { - m.mtx.RLock() - defer m.mtx.RUnlock() - - tSets := map[string][]*targetgroup.Group{} - n := map[string]int{} - for pkey, tsets := range m.targets { - for _, tg := range tsets { - // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' - // to signal that it needs to stop all scrape loops for this target set. - tSets[pkey.setName] = append(tSets[pkey.setName], tg) - n[pkey.setName] += len(tg.Targets) - } - } - for setName, v := range n { - m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) - } - return tSets -} - -// registerProviders returns a number of failed SD config. -func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int { - var ( - failed int - added bool - ) - add := func(cfg discovery.Config) { - for _, p := range m.providers { - if reflect.DeepEqual(cfg, p.config) { - p.subs = append(p.subs, setName) - added = true - return - } - } - typ := cfg.Name() - d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), - Metrics: m.sdMetrics[typ], - }) - if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) - failed++ - return - } - m.providers = append(m.providers, &provider{ - name: fmt.Sprintf("%s/%d", typ, len(m.providers)), - d: d, - config: cfg, - subs: []string{setName}, - }) - added = true - } - for _, cfg := range cfgs { - add(cfg) - } - if !added { - // Add an empty target group to force the refresh of the corresponding - // scrape pool and to notify the receiver that this target set has no - // current targets. - // It can happen because the combined set of SD configurations is empty - // or because we fail to instantiate all the SD configurations. - add(discovery.StaticConfig{{}}) - } - return failed -} - -// StaticProvider holds a list of target groups that never change. -type StaticProvider struct { - TargetGroups []*targetgroup.Group -} - -// Run implements the Worker interface. -func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { - // We still have to consider that the consumer exits right away in which case - // the context will be canceled. - select { - case ch <- sd.TargetGroups: - case <-ctx.Done(): - } - close(ch) -} diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go deleted file mode 100644 index a455a8e341..0000000000 --- a/discovery/legacymanager/manager_test.go +++ /dev/null @@ -1,1186 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "context" - "fmt" - "sort" - "strconv" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" - client_testutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/testutil" -) - -func TestMain(m *testing.M) { - testutil.TolerantVerifyLeak(m) -} - -func newTestMetrics(t *testing.T, reg prometheus.Registerer) (*discovery.RefreshMetricsManager, map[string]discovery.DiscovererMetrics) { - refreshMetrics := discovery.NewRefreshMetrics(reg) - sdMetrics, err := discovery.RegisterSDMetrics(reg, refreshMetrics) - require.NoError(t, err) - return &refreshMetrics, sdMetrics -} - -// TestTargetUpdatesOrder checks that the target updates are received in the expected order. -func TestTargetUpdatesOrder(t *testing.T) { - // The order by which the updates are send is determined by the interval passed to the mock discovery adapter - // Final targets array is ordered alphabetically by the name of the discoverer. - // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. - testCases := []struct { - title string - updates map[string][]update - expectedTargets [][]*targetgroup.Group - }{ - { - title: "Single TP no updates", - updates: map[string][]update{ - "tp1": {}, - }, - expectedTargets: nil, - }, - { - title: "Multiple TPs no updates", - updates: map[string][]update{ - "tp1": {}, - "tp2": {}, - "tp3": {}, - }, - expectedTargets: nil, - }, - { - title: "Single TP empty initials", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{}, - interval: 5 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - {}, - }, - }, - { - title: "Multiple TPs empty initials", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{}, - interval: 5 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{}, - interval: 200 * time.Millisecond, - }, - }, - "tp3": { - { - targetGroups: []targetgroup.Group{}, - interval: 100 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - {}, - {}, - {}, - }, - }, - { - title: "Single TP initials only", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - { - title: "Multiple TPs initials only", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - }, - }, - }, - { - title: "Single TP initials followed by empty updates", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 0, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - }, - }, - { - title: "Single TP initials and new groups", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 0, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - title: "Multiple TPs initials and new groups", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group4", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 500 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - interval: 100 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group4", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - }, - }, - { - title: "One TP initials arrive after other TP updates.", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 150 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - interval: 200 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - interval: 100 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - }, - }, - - { - title: "Single TP empty update in between", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 30 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 300 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - }, - }, - } - - for i, tc := range testCases { - tc := tc - t.Run(tc.title, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - - var totalUpdatesCount int - for _, up := range tc.updates { - if len(up) > 0 { - totalUpdatesCount += len(up) - } - } - provUpdates := make(chan []*targetgroup.Group, totalUpdatesCount) - - for _, up := range tc.updates { - go newMockDiscoveryProvider(up...).Run(ctx, provUpdates) - } - - for x := 0; x < totalUpdatesCount; x++ { - select { - case <-ctx.Done(): - t.Fatalf("%d: no update arrived within the timeout limit", x) - case tgs := <-provUpdates: - discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) - for _, got := range discoveryManager.allGroups() { - assertEqualGroups(t, got, tc.expectedTargets[x]) - } - } - } - }) - } -} - -func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) { - t.Helper() - - // Need to sort by the groups's source as the received order is not guaranteed. - sort.Sort(byGroupSource(got)) - sort.Sort(byGroupSource(expected)) - - require.Equal(t, expected, got) -} - -func staticConfig(addrs ...string) discovery.StaticConfig { - var cfg discovery.StaticConfig - for i, addr := range addrs { - cfg = append(cfg, &targetgroup.Group{ - Source: strconv.Itoa(i), - Targets: []model.LabelSet{ - {model.AddressLabel: model.LabelValue(addr)}, - }, - }) - } - return cfg -} - -func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) { - t.Helper() - if _, ok := tSets[poolKey]; !ok { - t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) - } - - match := false - var mergedTargets string - for _, targetGroup := range tSets[poolKey] { - for _, l := range targetGroup.Targets { - mergedTargets = mergedTargets + " " + l.String() - if l.String() == label { - match = true - } - } - } - if match != present { - msg := "" - if !present { - msg = "not" - } - t.Fatalf("%q should %s be present in Targets labels: %q", label, msg, mergedTargets) - } -} - -func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090", "bar:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - - c["prometheus"] = discovery.Configs{ - staticConfig("foo:9090"), - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false) -} - -func TestDiscovererConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090", "bar:9090"), - staticConfig("baz:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true) -} - -// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after -// removing all targets from the static_configs sends an update with empty targetGroups. -// This is required to signal the receiver that this target set has no current targets. -func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - - c["prometheus"] = discovery.Configs{ - discovery.StaticConfig{{}}, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - - pkey := poolKey{setName: "prometheus", provider: "static/0"} - targetGroups, ok := discoveryManager.targets[pkey] - if !ok { - t.Fatalf("'%v' should be present in target groups", pkey) - } - group, ok := targetGroups[""] - if !ok { - t.Fatalf("missing '' key in target groups %v", targetGroups) - } - - if len(group.Targets) != 0 { - t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) - } -} - -func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, nil, reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090"), - }, - "prometheus2": { - staticConfig("foo:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - if len(discoveryManager.providers) != 1 { - t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) - } -} - -func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { - originalConfig := discovery.Configs{ - staticConfig("foo:9090", "bar:9090", "baz:9090"), - } - processedConfig := discovery.Configs{ - staticConfig("foo:9090", "bar:9090", "baz:9090"), - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - cfgs := map[string]discovery.Configs{ - "prometheus": processedConfig, - } - discoveryManager.ApplyConfig(cfgs) - <-discoveryManager.SyncCh() - - for _, cfg := range cfgs { - require.Equal(t, originalConfig, cfg) - } -} - -type errorConfig struct{ err error } - -func (e errorConfig) Name() string { return "error" } -func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Discoverer, error) { - return nil, e.err -} - -// NewDiscovererMetrics implements discovery.Config. -func (errorConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { - return &discovery.NoopDiscovererMetrics{} -} - -func TestGaugeFailedConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - errorConfig{fmt.Errorf("tests error 0")}, - errorConfig{fmt.Errorf("tests error 1")}, - errorConfig{fmt.Errorf("tests error 2")}, - }, - } - discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - failedCount := client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) - if failedCount != 3 { - t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount) - } - - c["prometheus"] = discovery.Configs{ - staticConfig("foo:9090"), - } - discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - failedCount = client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) - if failedCount != 0 { - t.Fatalf("Expected to get no failed config, got: %v", failedCount) - } -} - -func TestCoordinationWithReceiver(t *testing.T) { - updateDelay := 100 * time.Millisecond - - type expect struct { - delay time.Duration - tgs map[string][]*targetgroup.Group - } - - testCases := []struct { - title string - providers map[string]discovery.Discoverer - expected []expect - }{ - { - title: "Receiver should get all updates even when one provider closes its channel", - providers: map[string]discovery.Discoverer{ - "once1": &onceProvider{ - tgs: []*targetgroup.Group{ - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - "mock1": newMockDiscoveryProvider( - update{ - interval: 2 * updateDelay, - targetGroups: []targetgroup.Group{ - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - ), - }, - expected: []expect{ - { - tgs: map[string][]*targetgroup.Group{ - "once1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - tgs: map[string][]*targetgroup.Group{ - "once1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - "mock1": { - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - }, - { - title: "Receiver should get all updates even when the channel is blocked", - providers: map[string]discovery.Discoverer{ - "mock1": newMockDiscoveryProvider( - update{ - targetGroups: []targetgroup.Group{ - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - update{ - interval: 4 * updateDelay, - targetGroups: []targetgroup.Group{ - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - ), - }, - expected: []expect{ - { - delay: 2 * updateDelay, - tgs: map[string][]*targetgroup.Group{ - "mock1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - delay: 4 * updateDelay, - tgs: map[string][]*targetgroup.Group{ - "mock1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.title, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - mgr := NewManager(ctx, nil, reg, sdMetrics) - require.NotNil(t, mgr) - mgr.updatert = updateDelay - go mgr.Run() - - for name, p := range tc.providers { - mgr.StartCustomProvider(ctx, name, p) - } - - for i, expected := range tc.expected { - time.Sleep(expected.delay) - select { - case <-ctx.Done(): - t.Fatalf("step %d: no update received in the expected timeframe", i) - case tgs, ok := <-mgr.SyncCh(): - if !ok { - t.Fatalf("step %d: discovery manager channel is closed", i) - } - if len(tgs) != len(expected.tgs) { - t.Fatalf("step %d: target groups mismatch, got: %d, expected: %d\ngot: %#v\nexpected: %#v", - i, len(tgs), len(expected.tgs), tgs, expected.tgs) - } - for k := range expected.tgs { - if _, ok := tgs[k]; !ok { - t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) - } - assertEqualGroups(t, tgs[k], expected.tgs[k]) - } - } - } - }) - } -} - -type update struct { - targetGroups []targetgroup.Group - interval time.Duration -} - -type mockdiscoveryProvider struct { - updates []update -} - -func newMockDiscoveryProvider(updates ...update) mockdiscoveryProvider { - tp := mockdiscoveryProvider{ - updates: updates, - } - return tp -} - -func (tp mockdiscoveryProvider) Run(ctx context.Context, upCh chan<- []*targetgroup.Group) { - for _, u := range tp.updates { - if u.interval > 0 { - select { - case <-ctx.Done(): - return - case <-time.After(u.interval): - } - } - tgs := make([]*targetgroup.Group, len(u.targetGroups)) - for i := range u.targetGroups { - tgs[i] = &u.targetGroups[i] - } - upCh <- tgs - } - <-ctx.Done() -} - -// byGroupSource implements sort.Interface so we can sort by the Source field. -type byGroupSource []*targetgroup.Group - -func (a byGroupSource) Len() int { return len(a) } -func (a byGroupSource) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byGroupSource) Less(i, j int) bool { return a[i].Source < a[j].Source } - -// onceProvider sends updates once (if any) and closes the update channel. -type onceProvider struct { - tgs []*targetgroup.Group -} - -func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { - if len(o.tgs) > 0 { - ch <- o.tgs - } - close(ch) -} diff --git a/discovery/legacymanager/registry.go b/discovery/legacymanager/registry.go deleted file mode 100644 index 955705394d..0000000000 --- a/discovery/legacymanager/registry.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "sync" - - "gopkg.in/yaml.v2" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" -) - -const ( - configFieldPrefix = "AUTO_DISCOVERY_" - staticConfigsKey = "static_configs" - staticConfigsFieldName = configFieldPrefix + staticConfigsKey -) - -var ( - configNames = make(map[string]discovery.Config) - configFieldNames = make(map[reflect.Type]string) - configFields []reflect.StructField - - configTypesMu sync.Mutex - configTypes = make(map[reflect.Type]reflect.Type) - - emptyStructType = reflect.TypeOf(struct{}{}) - configsType = reflect.TypeOf(discovery.Configs{}) -) - -// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling. -func RegisterConfig(config discovery.Config) { - registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config) -} - -func init() { - // N.B.: static_configs is the only Config type implemented by default. - // All other types are registered at init by their implementing packages. - elemTyp := reflect.TypeOf(&targetgroup.Group{}) - registerConfig(staticConfigsKey, elemTyp, discovery.StaticConfig{}) -} - -func registerConfig(yamlKey string, elemType reflect.Type, config discovery.Config) { - name := config.Name() - if _, ok := configNames[name]; ok { - panic(fmt.Sprintf("discovery: Config named %q is already registered", name)) - } - configNames[name] = config - - fieldName := configFieldPrefix + yamlKey // Field must be exported. - configFieldNames[elemType] = fieldName - - // Insert fields in sorted order. - i := sort.Search(len(configFields), func(k int) bool { - return fieldName < configFields[k].Name - }) - configFields = append(configFields, reflect.StructField{}) // Add empty field at end. - copy(configFields[i+1:], configFields[i:]) // Shift fields to the right. - configFields[i] = reflect.StructField{ // Write new field in place. - Name: fieldName, - Type: reflect.SliceOf(elemType), - Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`), - } -} - -func getConfigType(out reflect.Type) reflect.Type { - configTypesMu.Lock() - defer configTypesMu.Unlock() - if typ, ok := configTypes[out]; ok { - return typ - } - // Initial exported fields map one-to-one. - var fields []reflect.StructField - for i, n := 0, out.NumField(); i < n; i++ { - switch field := out.Field(i); { - case field.PkgPath == "" && field.Type != configsType: - fields = append(fields, field) - default: - fields = append(fields, reflect.StructField{ - Name: "_" + field.Name, // Field must be unexported. - PkgPath: out.PkgPath(), - Type: emptyStructType, - }) - } - } - // Append extra config fields on the end. - fields = append(fields, configFields...) - typ := reflect.StructOf(fields) - configTypes[out] = typ - return typ -} - -// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs -// that have a Configs field that should be inlined. -func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error { - outVal := reflect.ValueOf(out) - if outVal.Kind() != reflect.Ptr { - return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) - } - outVal = outVal.Elem() - if outVal.Kind() != reflect.Struct { - return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) - } - outTyp := outVal.Type() - - cfgTyp := getConfigType(outTyp) - cfgPtr := reflect.New(cfgTyp) - cfgVal := cfgPtr.Elem() - - // Copy shared fields (defaults) to dynamic value. - var configs *discovery.Configs - for i, n := 0, outVal.NumField(); i < n; i++ { - if outTyp.Field(i).Type == configsType { - configs = outVal.Field(i).Addr().Interface().(*discovery.Configs) - continue - } - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - cfgVal.Field(i).Set(outVal.Field(i)) - } - if configs == nil { - return fmt.Errorf("discovery: Configs field not found in type: %T", out) - } - - // Unmarshal into dynamic value. - if err := unmarshal(cfgPtr.Interface()); err != nil { - return replaceYAMLTypeError(err, cfgTyp, outTyp) - } - - // Copy shared fields from dynamic value. - for i, n := 0, outVal.NumField(); i < n; i++ { - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - outVal.Field(i).Set(cfgVal.Field(i)) - } - - var err error - *configs, err = readConfigs(cfgVal, outVal.NumField()) - return err -} - -func readConfigs(structVal reflect.Value, startField int) (discovery.Configs, error) { - var ( - configs discovery.Configs - targets []*targetgroup.Group - ) - for i, n := startField, structVal.NumField(); i < n; i++ { - field := structVal.Field(i) - if field.Kind() != reflect.Slice { - panic("discovery: internal error: field is not a slice") - } - for k := 0; k < field.Len(); k++ { - val := field.Index(k) - if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) { - key := configFieldNames[field.Type().Elem()] - key = strings.TrimPrefix(key, configFieldPrefix) - return nil, fmt.Errorf("empty or null section in %s", key) - } - switch c := val.Interface().(type) { - case *targetgroup.Group: - // Add index to the static config target groups for unique identification - // within scrape pool. - c.Source = strconv.Itoa(len(targets)) - // Coalesce multiple static configs into a single static config. - targets = append(targets, c) - case discovery.Config: - configs = append(configs, c) - default: - panic("discovery: internal error: slice element is not a Config") - } - } - } - if len(targets) > 0 { - configs = append(configs, discovery.StaticConfig(targets)) - } - return configs, nil -} - -// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs -// that have a Configs field that should be inlined. -func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) { - inVal := reflect.ValueOf(in) - for inVal.Kind() == reflect.Ptr { - inVal = inVal.Elem() - } - inTyp := inVal.Type() - - cfgTyp := getConfigType(inTyp) - cfgPtr := reflect.New(cfgTyp) - cfgVal := cfgPtr.Elem() - - // Copy shared fields to dynamic value. - var configs *discovery.Configs - for i, n := 0, inTyp.NumField(); i < n; i++ { - if inTyp.Field(i).Type == configsType { - configs = inVal.Field(i).Addr().Interface().(*discovery.Configs) - } - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - cfgVal.Field(i).Set(inVal.Field(i)) - } - if configs == nil { - return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in) - } - - if err := writeConfigs(cfgVal, *configs); err != nil { - return nil, err - } - - return cfgPtr.Interface(), nil -} - -func writeConfigs(structVal reflect.Value, configs discovery.Configs) error { - targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group) - for _, c := range configs { - if sc, ok := c.(discovery.StaticConfig); ok { - *targets = append(*targets, sc...) - continue - } - fieldName, ok := configFieldNames[reflect.TypeOf(c)] - if !ok { - return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c) - } - field := structVal.FieldByName(fieldName) - field.Set(reflect.Append(field, reflect.ValueOf(c))) - } - return nil -} - -func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { - var e *yaml.TypeError - if errors.As(err, &e) { - oldStr := oldTyp.String() - newStr := newTyp.String() - for i, s := range e.Errors { - e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) - } - } - return err -} diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 634a6b1d4b..033025f840 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/linode/linodego" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -138,10 +138,10 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*linodeMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ @@ -165,7 +165,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere Timeout: time.Duration(conf.RefreshInterval), }, ) - client.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)) + client.SetUserAgent(version.PrometheusUserAgent()) d.client = &client d.Discovery = refresh.NewDiscovery( @@ -194,13 +194,12 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { events, err := d.client.ListEvents(ctx, &eventsOpts) if err != nil { var e *linodego.Error - if errors.As(err, &e) && e.Code == http.StatusUnauthorized { - // If we get a 401, the token doesn't have `events:read_only` scope. - // Disable event polling and fallback to doing a full refresh every interval. - d.eventPollingEnabled = false - } else { + if !errors.As(err, &e) || e.Code != http.StatusUnauthorized { return nil, err } + // If we get a 401, the token doesn't have `events:read_only` scope. + // Disable event polling and fallback to doing a full refresh every interval. + d.eventPollingEnabled = false } else { // Event polling tells us changes the Linode API is aware of. Actions issued outside of the Linode API, // such as issuing a `shutdown` at the VM's console instead of using the API to power off an instance, diff --git a/discovery/linode/linode_test.go b/discovery/linode/linode_test.go index 3c10650653..7bcaa05ba4 100644 --- a/discovery/linode/linode_test.go +++ b/discovery/linode/linode_test.go @@ -19,10 +19,10 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -238,7 +238,7 @@ func TestLinodeSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) endpoint, err := url.Parse(sdmock.Endpoint()) require.NoError(t, err) diff --git a/discovery/manager.go b/discovery/manager.go index 897d7d151c..24950d9d59 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -16,14 +16,14 @@ package discovery import ( "context" "fmt" + "log/slog" "reflect" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -57,6 +57,8 @@ func (p *Provider) Discoverer() Discoverer { // IsStarted return true if Discoverer is started. func (p *Provider) IsStarted() bool { + p.mu.RLock() + defer p.mu.RUnlock() return p.cancel != nil } @@ -64,7 +66,7 @@ func (p *Provider) Config() interface{} { return p.config } -// Registers the metrics needed for SD mechanisms. +// CreateAndRegisterSDMetrics registers the metrics needed for SD mechanisms. // Does not register the metrics for the Discovery Manager. // TODO(ptodev): Add ability to unregister the metrics? func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) { @@ -81,9 +83,9 @@ func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]Discovere } // NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { +func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } mgr := &Manager{ logger: logger, @@ -101,12 +103,12 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re // Register the metrics. // We have to do this after setting all options, so that the name of the Manager is set. - if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil { - mgr.metrics = metrics - } else { - level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) + metrics, err := NewManagerMetrics(registerer, mgr.name) + if err != nil { + logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err) return nil } + mgr.metrics = metrics return mgr } @@ -141,7 +143,7 @@ func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) { // Manager maintains a set of discovery providers and sends each update to a map channel. // Targets are grouped by the target set name. type Manager struct { - logger log.Logger + logger *slog.Logger name string httpOpts []config.HTTPClientOption mtx sync.RWMutex @@ -212,29 +214,33 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { m.metrics.FailedConfigs.Set(float64(failedCount)) var ( - wg sync.WaitGroup - // keep shows if we keep any providers after reload. - keep bool + wg sync.WaitGroup newProviders []*Provider ) for _, prov := range m.providers { - // Cancel obsolete providers. - if len(prov.newSubs) == 0 { + // Cancel obsolete providers if it has no new subs and it has a cancel function. + // prov.cancel != nil is the same check as we use in IsStarted() method but we don't call IsStarted + // here because it would take a lock and we need the same lock ourselves for other reads. + prov.mu.RLock() + if len(prov.newSubs) == 0 && prov.cancel != nil { wg.Add(1) prov.done = func() { wg.Done() } + prov.cancel() + prov.mu.RUnlock() continue } + prov.mu.RUnlock() + newProviders = append(newProviders, prov) - // refTargets keeps reference targets used to populate new subs' targets + // refTargets keeps reference targets used to populate new subs' targets as they should be the same. var refTargets map[string]*targetgroup.Group prov.mu.Lock() m.targetsMtx.Lock() for s := range prov.subs { - keep = true refTargets = m.targets[poolKey{s, prov.name}] // Remove obsolete subs' targets. if _, ok := prov.newSubs[s]; !ok { @@ -267,7 +273,9 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { // While startProvider does pull the trigger, it may take some time to do so, therefore // we pull the trigger as soon as possible so that downstream managers can populate their state. // See https://github.com/prometheus/prometheus/pull/8639 for details. - if keep { + // This also helps making the downstream managers drop stale targets as soon as possible. + // See https://github.com/prometheus/prometheus/pull/13147 for details. + if len(m.providers) > 0 { select { case m.triggerSend <- struct{}{}: default: @@ -288,16 +296,20 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D name: {}, }, } + m.mtx.Lock() m.providers = append(m.providers, p) + m.mtx.Unlock() m.startProvider(ctx, p) } func (m *Manager) startProvider(ctx context.Context, p *Provider) { - level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) + m.logger.Debug("Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) ctx, cancel := context.WithCancel(ctx) updates := make(chan []*targetgroup.Group) + p.mu.Lock() p.cancel = cancel + p.mu.Unlock() go p.d.Run(ctx, updates) go m.updater(ctx, p, updates) @@ -305,16 +317,20 @@ func (m *Manager) startProvider(ctx context.Context, p *Provider) { // cleaner cleans resources associated with provider. func (m *Manager) cleaner(p *Provider) { + p.mu.Lock() + defer p.mu.Unlock() + m.targetsMtx.Lock() - p.mu.RLock() for s := range p.subs { delete(m.targets, poolKey{s, p.name}) } - p.mu.RUnlock() m.targetsMtx.Unlock() if p.done != nil { p.done() } + + // Provider was cleaned so mark is as down. + p.cancel = nil } func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targetgroup.Group) { @@ -327,7 +343,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ case tgs, ok := <-updates: m.metrics.ReceivedUpdates.Inc() if !ok { - level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) + m.logger.Debug("Discoverer channel closed", "provider", p.name) // Wait for provider cancellation to ensure targets are cleaned up when expected. <-ctx.Done() return @@ -363,7 +379,7 @@ func (m *Manager) sender() { case m.syncCh <- m.allGroups(): default: m.metrics.DelayedUpdates.Inc() - level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") + m.logger.Debug("Discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: default: @@ -379,9 +395,11 @@ func (m *Manager) cancelDiscoverers() { m.mtx.RLock() defer m.mtx.RUnlock() for _, p := range m.providers { + p.mu.RLock() if p.cancel != nil { p.cancel() } + p.mu.RUnlock() } } @@ -393,8 +411,16 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { m.targets[poolKey] = make(map[string]*targetgroup.Group) } for _, tg := range tgs { - if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. + // Some Discoverers send nil target group so need to check for it to avoid panics. + if tg == nil { + continue + } + if len(tg.Targets) > 0 { m.targets[poolKey][tg.Source] = tg + } else { + // The target group is empty, drop the corresponding entry to avoid leaks. + // In case the group yielded targets before, allGroups() will take care of making consumers drop them. + delete(m.targets[poolKey], tg.Source) } } } @@ -403,19 +429,33 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group { tSets := map[string][]*targetgroup.Group{} n := map[string]int{} - m.targetsMtx.Lock() - defer m.targetsMtx.Unlock() - for pkey, tsets := range m.targets { - for _, tg := range tsets { - // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' - // to signal that it needs to stop all scrape loops for this target set. - tSets[pkey.setName] = append(tSets[pkey.setName], tg) - n[pkey.setName] += len(tg.Targets) + m.mtx.RLock() + for _, p := range m.providers { + p.mu.RLock() + m.targetsMtx.Lock() + for s := range p.subs { + // Send empty lists for subs without any targets to make sure old stale targets are dropped by consumers. + // See: https://github.com/prometheus/prometheus/issues/12858 for details. + if _, ok := tSets[s]; !ok { + tSets[s] = []*targetgroup.Group{} + n[s] = 0 + } + if tsets, ok := m.targets[poolKey{s, p.name}]; ok { + for _, tg := range tsets { + tSets[s] = append(tSets[s], tg) + n[s] += len(tg.Targets) + } + } } + m.targetsMtx.Unlock() + p.mu.RUnlock() } + m.mtx.RUnlock() + for setName, v := range n { m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) } + return tSets } @@ -435,12 +475,12 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } typ := cfg.Name() d, err := cfg.NewDiscoverer(DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), + Logger: m.logger.With("discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, Metrics: m.sdMetrics[typ], }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) + m.logger.Error("Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index be07edbdb4..38a93be9f4 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -15,6 +15,7 @@ package discovery import ( "context" + "errors" "fmt" "sort" "strconv" @@ -22,10 +23,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -675,7 +676,7 @@ func TestTargetUpdatesOrder(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond @@ -694,7 +695,7 @@ func TestTargetUpdatesOrder(t *testing.T) { for x := 0; x < totalUpdatesCount; x++ { select { case <-ctx.Done(): - require.FailNow(t, "%d: no update arrived within the timeout limit", x) + t.Fatalf("%d: no update arrived within the timeout limit", x) case tgs := <-provUpdates: discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) for _, got := range discoveryManager.allGroups() { @@ -768,12 +769,10 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou } } } - if match != present { - msg := "" - if !present { - msg = "not" - } - require.FailNow(t, "%q should %s be present in Targets labels: %q", label, msg, mergedTargets) + if present { + require.Truef(t, match, "%q must be present in Targets labels: %q", label, mergedTargets) + } else { + require.Falsef(t, match, "%q must be absent in Targets labels: %q", label, mergedTargets) } } @@ -791,7 +790,7 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -828,7 +827,7 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -868,7 +867,7 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -911,7 +910,7 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -939,11 +938,13 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { discoveryManager.ApplyConfig(c) // Original targets should be present as soon as possible. + // An empty list should be sent for prometheus2 to drop any stale targets syncedTargets = <-discoveryManager.SyncCh() mu.Unlock() - require.Len(t, syncedTargets, 1) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) require.Len(t, syncedTargets["prometheus"], 1) + require.Empty(t, syncedTargets["prometheus2"]) // prometheus2 configs should be ready on second sync. syncedTargets = <-discoveryManager.SyncCh() @@ -977,7 +978,7 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1021,7 +1022,7 @@ func TestDiscovererConfigs(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1049,8 +1050,8 @@ func TestDiscovererConfigs(t *testing.T) { } // TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after -// removing all targets from the static_configs sends an update with empty targetGroups. -// This is required to signal the receiver that this target set has no current targets. +// removing all targets from the static_configs cleans the corresponding targetGroups entries to avoid leaks and sends an empty update. +// The update is required to signal the consumers that the previous targets should be dropped. func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1058,7 +1059,7 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1083,16 +1084,14 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() + require.Len(t, discoveryManager.targets, 1) p = pk("static", "prometheus", 1) targetGroups, ok := discoveryManager.targets[p] - require.True(t, ok, "'%v' should be present in target groups", p) - group, ok := targetGroups[""] - require.True(t, ok, "missing '' key in target groups %v", targetGroups) - - require.Empty(t, group.Targets, "Invalid number of targets.") - require.Len(t, syncedTargets, 1) - require.Len(t, syncedTargets["prometheus"], 1) - require.Nil(t, syncedTargets["prometheus"][0].Labels) + require.True(t, ok, "'%v' should be present in targets", p) + // Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436. + require.Empty(t, targetGroups, "'%v' should no longer have any associated target groups", p) + require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.") + require.Empty(t, syncedTargets["prometheus"]) } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { @@ -1141,7 +1140,7 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1202,16 +1201,16 @@ func TestGaugeFailedConfigs(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() c := map[string]Configs{ "prometheus": { - errorConfig{fmt.Errorf("tests error 0")}, - errorConfig{fmt.Errorf("tests error 1")}, - errorConfig{fmt.Errorf("tests error 2")}, + errorConfig{errors.New("tests error 0")}, + errorConfig{errors.New("tests error 1")}, + errorConfig{errors.New("tests error 2")}, }, } discoveryManager.ApplyConfig(c) @@ -1275,6 +1274,7 @@ func TestCoordinationWithReceiver(t *testing.T) { Targets: []model.LabelSet{{"__instance__": "1"}}, }, }, + "mock1": {}, }, }, { @@ -1371,10 +1371,10 @@ func TestCoordinationWithReceiver(t *testing.T) { time.Sleep(expected.delay) select { case <-ctx.Done(): - require.FailNow(t, "step %d: no update received in the expected timeframe", i) + t.Fatalf("step %d: no update received in the expected timeframe", i) case tgs, ok := <-mgr.SyncCh(): require.True(t, ok, "step %d: discovery manager channel is closed", i) - require.Equal(t, len(expected.tgs), len(tgs), "step %d: targets mismatch", i) + require.Len(t, tgs, len(expected.tgs), "step %d: targets mismatch", i) for k := range expected.tgs { _, ok := tgs[k] @@ -1453,7 +1453,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { reg := prometheus.NewRegistry() _, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1550,7 +1550,7 @@ func TestUnregisterMetrics(t *testing.T) { refreshMetrics, sdMetrics := NewTestMetrics(t, reg) - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) + discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics) // discoveryManager will be nil if there was an error configuring metrics. require.NotNil(t, discoveryManager) // Unregister all metrics. @@ -1562,3 +1562,53 @@ func TestUnregisterMetrics(t *testing.T) { cancel() } } + +// Calling ApplyConfig() that removes providers at the same time as shutting down +// the manager should not hang. +func TestConfigReloadAndShutdownRace(t *testing.T) { + reg := prometheus.NewRegistry() + _, sdMetrics := NewTestMetrics(t, reg) + + mgrCtx, mgrCancel := context.WithCancel(context.Background()) + discoveryManager := NewManager(mgrCtx, promslog.NewNopLogger(), reg, sdMetrics) + require.NotNil(t, discoveryManager) + discoveryManager.updatert = 100 * time.Millisecond + + var wgDiscovery sync.WaitGroup + wgDiscovery.Add(1) + go func() { + discoveryManager.Run() + wgDiscovery.Done() + }() + time.Sleep(time.Millisecond * 200) + + var wgBg sync.WaitGroup + updateChan := discoveryManager.SyncCh() + wgBg.Add(1) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer wgBg.Done() + select { + case <-ctx.Done(): + return + case <-updateChan: + } + }() + + c := map[string]Configs{ + "prometheus": {staticConfig("bar:9090")}, + } + discoveryManager.ApplyConfig(c) + + delete(c, "prometheus") + wgBg.Add(1) + go func() { + discoveryManager.ApplyConfig(c) + wgBg.Done() + }() + mgrCancel() + wgDiscovery.Wait() + + cancel() + wgBg.Wait() +} diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 38b47accff..0c2c2e9702 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math/rand" "net" "net/http" @@ -27,7 +28,6 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -80,7 +80,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &marathonMetrics{ refreshMetrics: rmi, } @@ -140,10 +140,10 @@ type Discovery struct { } // NewDiscovery returns a new Marathon Discovery. -func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*marathonMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd") diff --git a/discovery/marathon/marathon_test.go b/discovery/marathon/marathon_test.go index 659899f163..18ec7bdf19 100644 --- a/discovery/marathon/marathon_test.go +++ b/discovery/marathon/marathon_test.go @@ -202,7 +202,7 @@ func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) { tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) } @@ -243,7 +243,7 @@ func TestMarathonZeroTaskPorts(t *testing.T) { func Test500ErrorHttpResponseWithValidJSONBody(t *testing.T) { // Simulate 500 error with a valid JSON response. - respHandler := func(w http.ResponseWriter, r *http.Request) { + respHandler := func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/json") io.WriteString(w, `{}`) @@ -300,9 +300,9 @@ func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:1234", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] @@ -354,12 +354,12 @@ func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") require.Equal(t, "yes", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } @@ -401,13 +401,13 @@ func TestMarathonSDSendGroupWithPorts(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithContainerPortMappings(labels map[string]string, runningTasks int) *appList { @@ -458,12 +458,12 @@ func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string, runningTasks int) *appList { @@ -514,12 +514,12 @@ func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]string, runningTasks int) *appList { @@ -574,10 +574,10 @@ func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "1.2.3.4:8080", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "1.2.3.4:1234", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } diff --git a/discovery/metrics_refresh.go b/discovery/metrics_refresh.go index d621165ced..ef49e591a3 100644 --- a/discovery/metrics_refresh.go +++ b/discovery/metrics_refresh.go @@ -17,7 +17,7 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// Metric vectors for the "refresh" package. +// RefreshMetricsVecs are metric vectors for the "refresh" package. // We define them here in the "discovery" package in order to avoid a cyclic dependency between // "discovery" and "refresh". type RefreshMetricsVecs struct { diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 11445092ee..2b640dea82 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -15,22 +15,24 @@ package moby import ( "context" + "errors" "fmt" + "log/slog" "net" "net/http" "net/url" + "sort" "strconv" "time" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" @@ -109,7 +111,7 @@ func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error return err } if c.Host == "" { - return fmt.Errorf("host missing") + return errors.New("host missing") } if _, err = url.Parse(c.Host); err != nil { return err @@ -127,10 +129,10 @@ type DockerDiscovery struct { } // NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets. -func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) { +func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) { m, ok := metrics.(*dockerMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &DockerDiscovery{ @@ -171,7 +173,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discove }), client.WithScheme(hostURL.Scheme), client.WithHTTPHeaders(map[string]string{ - "User-Agent": userAgent, + "User-Agent": version.PrometheusUserAgent(), }), ) } @@ -208,7 +210,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er return nil, fmt.Errorf("error while computing network labels: %w", err) } - allContainers := make(map[string]types.Container) + allContainers := make(map[string]container.Summary) for _, c := range containers { allContainers[c.ID] = c } @@ -233,46 +235,40 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode) if len(networks) == 0 { // Try to lookup shared networks - for { - if containerNetworkMode.IsContainer() { - tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()] - if !exists { - break - } - networks = tmpContainer.NetworkSettings.Networks - containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode) - if len(networks) > 0 { - break - } - } else { + for containerNetworkMode.IsContainer() { + tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()] + if !exists { + break + } + networks = tmpContainer.NetworkSettings.Networks + containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode) + if len(networks) > 0 { break } } } if d.matchFirstNetwork && len(networks) > 1 { - // Match user defined network - if containerNetworkMode.IsUserDefined() { - networkMode := string(containerNetworkMode) - networks = map[string]*network.EndpointSettings{networkMode: networks[networkMode]} - } else { - // Get first network if container network mode has "none" value. - // This case appears under certain condition: - // 1. Container created with network set to "--net=none". - // 2. Disconnect network "none". - // 3. Reconnect network with user defined networks. - var first string - for k, n := range networks { - if n != nil { - first = k - break - } + // Sort networks by name and take first non-nil network. + keys := make([]string, 0, len(networks)) + for k, n := range networks { + if n != nil { + keys = append(keys, k) } - networks = map[string]*network.EndpointSettings{first: networks[first]} + } + if len(keys) > 0 { + sort.Strings(keys) + firstNetworkMode := keys[0] + firstNetwork := networks[firstNetworkMode] + networks = map[string]*network.EndpointSettings{firstNetworkMode: firstNetwork} } } for _, n := range networks { + if n == nil { + continue + } + var added bool for _, p := range c.Ports { diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go index c108ddf582..00e6a3e4f3 100644 --- a/discovery/moby/docker_test.go +++ b/discovery/moby/docker_test.go @@ -19,9 +19,9 @@ import ( "sort" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -60,9 +60,9 @@ host: %s tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 6) + require.Len(t, tg.Targets, 8) - for i, lbls := range []model.LabelSet{ + expected := []model.LabelSet{ { "__address__": "172.19.0.2:9100", "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", @@ -163,7 +163,43 @@ host: %s "__meta_docker_network_scope": "local", "__meta_docker_port_private": "9104", }, - } { + { + "__address__": "172.20.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + } + sortFunc(expected) + sortFunc(tg.Targets) + + for i, lbls := range expected { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) @@ -190,7 +226,7 @@ host: %s require.NoError(t, metrics.Register()) defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -202,13 +238,8 @@ host: %s tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 9) + require.Len(t, tg.Targets, 13) - sortFunc := func(labelSets []model.LabelSet) { - sort.Slice(labelSets, func(i, j int) bool { - return labelSets[i]["__address__"] < labelSets[j]["__address__"] - }) - } expected := []model.LabelSet{ { "__address__": "172.19.0.2:9100", @@ -359,6 +390,70 @@ host: %s "__meta_docker_network_scope": "local", "__meta_docker_port_private": "9104", }, + { + "__address__": "172.20.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + { + "__address__": "172.21.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.3", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.21.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.3", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, } sortFunc(expected) @@ -370,3 +465,9 @@ host: %s }) } } + +func sortFunc(labelSets []model.LabelSet) { + sort.Slice(labelSets, func(i, j int) bool { + return labelSets[i]["__address__"] < labelSets[j]["__address__"] + }) +} diff --git a/discovery/moby/dockerswarm.go b/discovery/moby/dockerswarm.go index b0147467d2..57c0af7171 100644 --- a/discovery/moby/dockerswarm.go +++ b/discovery/moby/dockerswarm.go @@ -15,14 +15,15 @@ package moby import ( "context" + "errors" "fmt" + "log/slog" "net/http" "net/url" "time" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -37,8 +38,6 @@ const ( swarmLabel = model.MetaLabelPrefix + "dockerswarm_" ) -var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) - // DefaultDockerSwarmSDConfig is the default Docker Swarm SD configuration. var DefaultDockerSwarmSDConfig = DockerSwarmSDConfig{ RefreshInterval: model.Duration(60 * time.Second), @@ -71,7 +70,7 @@ type Filter struct { } // NewDiscovererMetrics implements discovery.Config. -func (*DockerSwarmSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*DockerSwarmSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &dockerswarmMetrics{ refreshMetrics: rmi, } @@ -99,7 +98,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e return err } if c.Host == "" { - return fmt.Errorf("host missing") + return errors.New("host missing") } if _, err = url.Parse(c.Host); err != nil { return err @@ -107,7 +106,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e switch c.Role { case "services", "nodes", "tasks": case "": - return fmt.Errorf("role missing (one of: tasks, services, nodes)") + return errors.New("role missing (one of: tasks, services, nodes)") default: return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role) } @@ -125,10 +124,10 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*dockerswarmMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ @@ -168,7 +167,7 @@ func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, metrics discover }), client.WithScheme(hostURL.Scheme), client.WithHTTPHeaders(map[string]string{ - "User-Agent": userAgent, + "User-Agent": version.PrometheusUserAgent(), }), ) } diff --git a/discovery/moby/mock_test.go b/discovery/moby/mock_test.go index 3f35258c8f..7ef5cb07c3 100644 --- a/discovery/moby/mock_test.go +++ b/discovery/moby/mock_test.go @@ -98,7 +98,7 @@ func (m *SDMock) SetupHandlers() { if len(query) == 2 { h := sha1.New() h.Write([]byte(query[1])) - // Avoing long filenames for Windows. + // Avoiding long filenames for Windows. f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10] } } diff --git a/discovery/moby/network.go b/discovery/moby/network.go index 794d2e607d..ea1ca66bc7 100644 --- a/discovery/moby/network.go +++ b/discovery/moby/network.go @@ -17,7 +17,7 @@ import ( "context" "strconv" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/prometheus/prometheus/util/strutil" @@ -34,7 +34,7 @@ const ( ) func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) { - networks, err := client.NetworkList(ctx, types.NetworkListOptions{}) + networks, err := client.NetworkList(ctx, network.ListOptions{}) if err != nil { return nil, err } diff --git a/discovery/moby/nodes.go b/discovery/moby/nodes.go index b5be844eda..a11afeee25 100644 --- a/discovery/moby/nodes.go +++ b/discovery/moby/nodes.go @@ -19,7 +19,7 @@ import ( "net" "strconv" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -48,7 +48,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err Source: "DockerSwarm", } - nodes, err := d.client.NodeList(ctx, types.NodeListOptions{Filters: d.filters}) + nodes, err := d.client.NodeList(ctx, swarm.NodeListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm nodes: %w", err) } @@ -85,7 +85,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err } func (d *Discovery) getNodesLabels(ctx context.Context) (map[string]map[string]string, error) { - nodes, err := d.client.NodeList(ctx, types.NodeListOptions{}) + nodes, err := d.client.NodeList(ctx, swarm.NodeListOptions{}) if err != nil { return nil, fmt.Errorf("error while listing swarm nodes: %w", err) } diff --git a/discovery/moby/nodes_test.go b/discovery/moby/nodes_test.go index 4ad1088d1a..973b83c4b6 100644 --- a/discovery/moby/nodes_test.go +++ b/discovery/moby/nodes_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/moby/services.go b/discovery/moby/services.go index c61b499259..0698c01e6a 100644 --- a/discovery/moby/services.go +++ b/discovery/moby/services.go @@ -19,7 +19,6 @@ import ( "net" "strconv" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" "github.com/prometheus/common/model" @@ -46,7 +45,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group, Source: "DockerSwarm", } - services, err := d.client.ServiceList(ctx, types.ServiceListOptions{Filters: d.filters}) + services, err := d.client.ServiceList(ctx, swarm.ServiceListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm services: %w", err) } @@ -127,7 +126,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group, } func (d *Discovery) getServicesLabelsAndPorts(ctx context.Context) (map[string]map[string]string, map[string][]swarm.PortConfig, error) { - services, err := d.client.ServiceList(ctx, types.ServiceListOptions{}) + services, err := d.client.ServiceList(ctx, swarm.ServiceListOptions{}) if err != nil { return nil, nil, err } diff --git a/discovery/moby/services_test.go b/discovery/moby/services_test.go index 47ca69e33a..7a966cfeee 100644 --- a/discovery/moby/services_test.go +++ b/discovery/moby/services_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -349,7 +349,7 @@ filters: defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/moby/tasks.go b/discovery/moby/tasks.go index 38b9d33de2..46e8a06d01 100644 --- a/discovery/moby/tasks.go +++ b/discovery/moby/tasks.go @@ -19,7 +19,6 @@ import ( "net" "strconv" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" "github.com/prometheus/common/model" @@ -43,7 +42,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err Source: "DockerSwarm", } - tasks, err := d.client.TaskList(ctx, types.TaskListOptions{Filters: d.filters}) + tasks, err := d.client.TaskList(ctx, swarm.TaskListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm services: %w", err) } diff --git a/discovery/moby/tasks_test.go b/discovery/moby/tasks_test.go index ef71bc02f5..59d8831c3b 100644 --- a/discovery/moby/tasks_test.go +++ b/discovery/moby/tasks_test.go @@ -18,9 +18,9 @@ import ( "fmt" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -48,7 +48,7 @@ host: %s defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/moby/testdata/dockerprom/containers/json.json b/discovery/moby/testdata/dockerprom/containers/json.json index ebfc56b6d5..33406bf9a4 100644 --- a/discovery/moby/testdata/dockerprom/containers/json.json +++ b/discovery/moby/testdata/dockerprom/containers/json.json @@ -228,5 +228,74 @@ "Networks": {} }, "Mounts": [] + }, + { + "Id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "Names": [ + "/dockersd_multi_networks" + ], + "Image": "mysql:5.7.29", + "ImageID": "sha256:16ae2f4625ba63a250462bedeece422e741de9f0caf3b1d89fd5b257aca80cd1", + "Command": "mysqld", + "Created": 1616273136, + "Ports": [ + { + "PrivatePort": 3306, + "Type": "tcp" + }, + { + "PrivatePort": 33060, + "Type": "tcp" + } + ], + "Labels": { + "com.docker.compose.project": "dockersd", + "com.docker.compose.service": "mysql", + "com.docker.compose.version": "2.2.2" + }, + "State": "running", + "Status": "Up 40 seconds", + "HostConfig": { + "NetworkMode": "dockersd_private_none" + }, + "NetworkSettings": { + "Networks": { + "dockersd_private": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "EndpointID": "972d6807997369605ace863af58de6cb90c787a5bf2ffc4105662d393ae539b7", + "Gateway": "172.20.0.1", + "IPAddress": "172.20.0.3", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:14:00:02", + "DriverOpts": null + }, + "dockersd_private1": { + "IPAMConfig": {}, + "Links": null, + "Aliases": [ + "mysql", + "mysql", + "f9ade4b83199" + ], + "NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "EndpointID": "91a98405344ee1cb7d977cafabe634837876651544b32da20a5e0155868e6f5f", + "Gateway": "172.21.0.1", + "IPAddress": "172.21.0.3", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:15:00:02", + "DriverOpts": null + } + } + }, + "Mounts": [] } ] diff --git a/discovery/moby/testdata/swarmprom/services.json b/discovery/moby/testdata/swarmprom/services.json index 72caa7a7f8..8f6c0793dd 100644 --- a/discovery/moby/testdata/swarmprom/services.json +++ b/discovery/moby/testdata/swarmprom/services.json @@ -224,7 +224,7 @@ "Args": [ "--config.file=/etc/prometheus/prometheus.yml", "--storage.tsdb.path=/prometheus", - "--storage.tsdb.retention=24h" + "--storage.tsdb.retention.time=24h" ], "Privileges": { "CredentialSpec": null, diff --git a/discovery/moby/testdata/swarmprom/tasks.json b/discovery/moby/testdata/swarmprom/tasks.json index 33d81f25ce..af5ff9fe28 100644 --- a/discovery/moby/testdata/swarmprom/tasks.json +++ b/discovery/moby/testdata/swarmprom/tasks.json @@ -973,7 +973,7 @@ "Args": [ "--config.file=/etc/prometheus/prometheus.yml", "--storage.tsdb.path=/prometheus", - "--storage.tsdb.retention=24h" + "--storage.tsdb.retention.time=24h" ], "Privileges": { "CredentialSpec": null, diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go index d9c48120ae..7516308026 100644 --- a/discovery/nomad/nomad.go +++ b/discovery/nomad/nomad.go @@ -17,12 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" "time" - "github.com/go-kit/log" nomad "github.com/hashicorp/nomad/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -121,10 +121,10 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*nomadMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ diff --git a/discovery/nomad/nomad_test.go b/discovery/nomad/nomad_test.go index 357d4a8e9b..a73b45785d 100644 --- a/discovery/nomad/nomad_test.go +++ b/discovery/nomad/nomad_test.go @@ -21,9 +21,9 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -76,7 +76,7 @@ func (s *NomadSDTestSuite) SetupTest(t *testing.T) { } func (m *SDMock) HandleServicesList() { - m.Mux.HandleFunc("/v1/services", func(w http.ResponseWriter, r *http.Request) { + m.Mux.HandleFunc("/v1/services", func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("content-type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) @@ -99,7 +99,7 @@ func (m *SDMock) HandleServicesList() { } func (m *SDMock) HandleServiceHashiCupsGet() { - m.Mux.HandleFunc("/v1/service/hashicups", func(w http.ResponseWriter, r *http.Request) { + m.Mux.HandleFunc("/v1/service/hashicups", func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("content-type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) @@ -127,19 +127,37 @@ func (m *SDMock) HandleServiceHashiCupsGet() { } func TestConfiguredService(t *testing.T) { - conf := &SDConfig{ - Server: "http://localhost:4646", + testCases := []struct { + name string + server string + acceptedURL bool + }{ + {"invalid hostname URL", "http://foo.bar:4646", true}, + {"invalid even though accepted by parsing", "foo.bar:4646", true}, + {"valid address URL", "http://172.30.29.23:4646", true}, + {"invalid URL", "172.30.29.23:4646", false}, } - reg := prometheus.NewRegistry() - refreshMetrics := discovery.NewRefreshMetrics(reg) - metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) - require.NoError(t, metrics.Register()) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + conf := &SDConfig{ + Server: tc.server, + } - _, err := NewDiscovery(conf, nil, metrics) - require.NoError(t, err) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() - metrics.Unregister() + _, err := NewDiscovery(conf, nil, metrics) + if tc.acceptedURL { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } } func TestNomadSDRefresh(t *testing.T) { @@ -160,7 +178,7 @@ func TestNomadSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) tgs, err := d.refresh(context.Background()) diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go index 8964da9294..e7a6362052 100644 --- a/discovery/openstack/hypervisor.go +++ b/discovery/openstack/hypervisor.go @@ -16,14 +16,14 @@ package openstack import ( "context" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" - "github.com/gophercloud/gophercloud/pagination" + "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack" + "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/hypervisors" + "github.com/gophercloud/gophercloud/v2/pagination" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -43,14 +43,14 @@ type HypervisorDiscovery struct { provider *gophercloud.ProviderClient authOpts *gophercloud.AuthOptions region string - logger log.Logger + logger *slog.Logger port int availability gophercloud.Availability } // newHypervisorDiscovery returns a new hypervisor discovery. func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, - port int, region string, availability gophercloud.Availability, l log.Logger, + port int, region string, availability gophercloud.Availability, l *slog.Logger, ) *HypervisorDiscovery { return &HypervisorDiscovery{ provider: provider, authOpts: opts, @@ -59,8 +59,7 @@ func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercl } func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { - h.provider.Context = ctx - err := openstack.Authenticate(h.provider, *h.authOpts) + err := openstack.Authenticate(ctx, h.provider, *h.authOpts) if err != nil { return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err) } @@ -78,7 +77,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group // OpenStack API reference // https://developer.openstack.org/api-ref/compute/#list-hypervisors-details pagerHypervisors := hypervisors.List(client, nil) - err = pagerHypervisors.EachPage(func(page pagination.Page) (bool, error) { + err = pagerHypervisors.EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { hypervisorList, err := hypervisors.ExtractHypervisors(page) if err != nil { return false, fmt.Errorf("could not extract hypervisors: %w", err) diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go index 45684b4a2e..e4a97f32cf 100644 --- a/discovery/openstack/hypervisor_test.go +++ b/discovery/openstack/hypervisor_test.go @@ -93,6 +93,5 @@ func TestOpenstackSDHypervisorRefreshWithDoneContext(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := hypervisor.refresh(ctx) - require.Error(t, err) - require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) + require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) } diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index 78c669e6f7..6c2f79b3a4 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -16,17 +16,18 @@ package openstack import ( "context" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/pagination" + "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack" + "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports" + "github.com/gophercloud/gophercloud/v2/pagination" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" @@ -52,7 +53,7 @@ type InstanceDiscovery struct { provider *gophercloud.ProviderClient authOpts *gophercloud.AuthOptions region string - logger log.Logger + logger *slog.Logger port int allTenants bool availability gophercloud.Availability @@ -60,10 +61,10 @@ type InstanceDiscovery struct { // NewInstanceDiscovery returns a new instance discovery. func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, - port int, region string, allTenants bool, availability gophercloud.Availability, l log.Logger, + port int, region string, allTenants bool, availability gophercloud.Availability, l *slog.Logger, ) *InstanceDiscovery { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } return &InstanceDiscovery{ provider: provider, authOpts: opts, @@ -72,13 +73,12 @@ func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gopherclou } type floatingIPKey struct { - id string - fixed string + deviceID string + fixed string } func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { - i.provider.Context = ctx - err := openstack.Authenticate(i.provider, *i.authOpts) + err := openstack.Authenticate(ctx, i.provider, *i.authOpts) if err != nil { return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err) } @@ -90,23 +90,60 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, return nil, fmt.Errorf("could not create OpenStack compute session: %w", err) } + networkClient, err := openstack.NewNetworkV2(i.provider, gophercloud.EndpointOpts{ + Region: i.region, Availability: i.availability, + }) + if err != nil { + return nil, fmt.Errorf("could not create OpenStack network session: %w", err) + } + // OpenStack API reference - // https://developer.openstack.org/api-ref/compute/#list-floating-ips - pagerFIP := floatingips.List(client) + // https://docs.openstack.org/api-ref/network/v2/index.html#list-ports + portPages, err := ports.List(networkClient, ports.ListOpts{}).AllPages(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list all ports: %w", err) + } + + allPorts, err := ports.ExtractPorts(portPages) + if err != nil { + return nil, fmt.Errorf("failed to extract Ports: %w", err) + } + + portList := make(map[string]string) + for _, port := range allPorts { + portList[port.ID] = port.DeviceID + } + + // OpenStack API reference + // https://docs.openstack.org/api-ref/network/v2/index.html#list-floating-ips + pagerFIP := floatingips.List(networkClient, floatingips.ListOpts{}) floatingIPList := make(map[floatingIPKey]string) floatingIPPresent := make(map[string]struct{}) - err = pagerFIP.EachPage(func(page pagination.Page) (bool, error) { + err = pagerFIP.EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { result, err := floatingips.ExtractFloatingIPs(page) if err != nil { return false, fmt.Errorf("could not extract floatingips: %w", err) } for _, ip := range result { // Skip not associated ips - if ip.InstanceID == "" || ip.FixedIP == "" { + if ip.PortID == "" || ip.FixedIP == "" { continue } - floatingIPList[floatingIPKey{id: ip.InstanceID, fixed: ip.FixedIP}] = ip.IP - floatingIPPresent[ip.IP] = struct{}{} + + // Fetch deviceID from portList + deviceID, ok := portList[ip.PortID] + if !ok { + i.logger.Warn("Floating IP PortID not found in portList", "PortID", ip.PortID) + continue + } + + key := floatingIPKey{ + deviceID: deviceID, + fixed: ip.FixedIP, + } + + floatingIPList[key] = ip.FloatingIP + floatingIPPresent[ip.FloatingIP] = struct{}{} } return true, nil }) @@ -123,7 +160,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, tg := &targetgroup.Group{ Source: "OS_" + i.region, } - err = pager.EachPage(func(page pagination.Page) (bool, error) { + err = pager.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) { if ctx.Err() != nil { return false, fmt.Errorf("could not extract instances: %w", ctx.Err()) } @@ -134,7 +171,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, for _, s := range instanceList { if len(s.Addresses) == 0 { - level.Info(i.logger).Log("msg", "Got no IP address", "instance", s.ID) + i.logger.Info("Got no IP address", "instance", s.ID) continue } @@ -151,7 +188,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, if !nameOk { flavorID, idOk := s.Flavor["id"].(string) if !idOk { - level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string") + i.logger.Warn("Invalid type for both flavor original_name and flavor id, expected string") continue } labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID) @@ -171,22 +208,22 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, for pool, address := range s.Addresses { md, ok := address.([]interface{}) if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for address, expected array") + i.logger.Warn("Invalid type for address, expected array") continue } if len(md) == 0 { - level.Debug(i.logger).Log("msg", "Got no IP address", "instance", s.ID) + i.logger.Debug("Got no IP address", "instance", s.ID) continue } for _, address := range md { md1, ok := address.(map[string]interface{}) if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for address, expected dict") + i.logger.Warn("Invalid type for address, expected dict") continue } addr, ok := md1["addr"].(string) if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for address, expected string") + i.logger.Warn("Invalid type for address, expected string") continue } if _, ok := floatingIPPresent[addr]; ok { @@ -198,7 +235,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, } lbls[openstackLabelAddressPool] = model.LabelValue(pool) lbls[openstackLabelPrivateIP] = model.LabelValue(addr) - if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok { + if val, ok := floatingIPList[floatingIPKey{deviceID: s.ID, fixed: addr}]; ok { lbls[openstackLabelPublicIP] = model.LabelValue(val) } addr = net.JoinHostPort(addr, strconv.Itoa(i.port)) diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index 2b5ac1b89e..0933b57067 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -32,6 +32,7 @@ func (s *OpenstackSDInstanceTestSuite) SetupTest(t *testing.T) { s.Mock.HandleServerListSuccessfully() s.Mock.HandleFloatingIPListSuccessfully() + s.Mock.HandlePortsListSuccessfully() s.Mock.HandleVersionsSuccessfully() s.Mock.HandleAuthSuccessfully() @@ -66,7 +67,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 4) + require.Len(t, tg.Targets, 6) for i, lbls := range []model.LabelSet{ { @@ -119,6 +120,31 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { "__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), }, + { + "__address__": model.LabelValue("10.0.0.33:0"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), + "__meta_openstack_instance_id": model.LabelValue("87caf8ed-d92a-41f6-9dcd-d1399e39899f"), + "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), + "__meta_openstack_instance_name": model.LabelValue("merp-project2"), + "__meta_openstack_private_ip": model.LabelValue("10.0.0.33"), + "__meta_openstack_address_pool": model.LabelValue("private"), + "__meta_openstack_tag_env": model.LabelValue("prod"), + "__meta_openstack_project_id": model.LabelValue("b78fef2305934dbbbeb9a10b4c326f7a"), + "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), + }, + { + "__address__": model.LabelValue("10.0.0.34:0"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), + "__meta_openstack_instance_id": model.LabelValue("87caf8ed-d92a-41f6-9dcd-d1399e39899f"), + "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), + "__meta_openstack_instance_name": model.LabelValue("merp-project2"), + "__meta_openstack_private_ip": model.LabelValue("10.0.0.34"), + "__meta_openstack_address_pool": model.LabelValue("private"), + "__meta_openstack_tag_env": model.LabelValue("prod"), + "__meta_openstack_public_ip": model.LabelValue("10.10.10.24"), + "__meta_openstack_project_id": model.LabelValue("b78fef2305934dbbbeb9a10b4c326f7a"), + "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), + }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) @@ -134,6 +160,5 @@ func TestOpenstackSDInstanceRefreshWithDoneContext(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := hypervisor.refresh(ctx) - require.Error(t, err) - require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) + require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) } diff --git a/discovery/openstack/loadbalancer.go b/discovery/openstack/loadbalancer.go new file mode 100644 index 0000000000..254b713cdd --- /dev/null +++ b/discovery/openstack/loadbalancer.go @@ -0,0 +1,193 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openstack + +import ( + "context" + "fmt" + "log/slog" + "net" + "strconv" + "strings" + + "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack" + "github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners" + "github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" + + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + openstackLabelLoadBalancerID = openstackLabelPrefix + "loadbalancer_id" + openstackLabelLoadBalancerName = openstackLabelPrefix + "loadbalancer_name" + openstackLabelLoadBalancerOperatingStatus = openstackLabelPrefix + "loadbalancer_operating_status" + openstackLabelLoadBalancerProvisioningStatus = openstackLabelPrefix + "loadbalancer_provisioning_status" + openstackLabelLoadBalancerAvailabilityZone = openstackLabelPrefix + "loadbalancer_availability_zone" + openstackLabelLoadBalancerFloatingIP = openstackLabelPrefix + "loadbalancer_floating_ip" + openstackLabelLoadBalancerVIP = openstackLabelPrefix + "loadbalancer_vip" + openstackLabelLoadBalancerProvider = openstackLabelPrefix + "loadbalancer_provider" + openstackLabelLoadBalancerTags = openstackLabelPrefix + "loadbalancer_tags" +) + +// LoadBalancerDiscovery discovers OpenStack load balancers. +type LoadBalancerDiscovery struct { + provider *gophercloud.ProviderClient + authOpts *gophercloud.AuthOptions + region string + logger *slog.Logger + availability gophercloud.Availability +} + +// NewLoadBalancerDiscovery returns a new loadbalancer discovery. +func newLoadBalancerDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, + region string, availability gophercloud.Availability, l *slog.Logger, +) *LoadBalancerDiscovery { + if l == nil { + l = promslog.NewNopLogger() + } + return &LoadBalancerDiscovery{ + provider: provider, authOpts: opts, + region: region, availability: availability, logger: l, + } +} + +func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { + err := openstack.Authenticate(ctx, i.provider, *i.authOpts) + if err != nil { + return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err) + } + + client, err := openstack.NewLoadBalancerV2(i.provider, gophercloud.EndpointOpts{ + Region: i.region, Availability: i.availability, + }) + if err != nil { + return nil, fmt.Errorf("could not create OpenStack load balancer session: %w", err) + } + + networkClient, err := openstack.NewNetworkV2(i.provider, gophercloud.EndpointOpts{ + Region: i.region, Availability: i.availability, + }) + if err != nil { + return nil, fmt.Errorf("could not create OpenStack network session: %w", err) + } + + allPages, err := loadbalancers.List(client, loadbalancers.ListOpts{}).AllPages(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list load balancers: %w", err) + } + + allLBs, err := loadbalancers.ExtractLoadBalancers(allPages) + if err != nil { + return nil, fmt.Errorf("failed to extract load balancers: %w", err) + } + + // Fetch all listeners in one API call + listenerPages, err := listeners.List(client, listeners.ListOpts{}).AllPages(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list all listeners: %w", err) + } + + allListeners, err := listeners.ExtractListeners(listenerPages) + if err != nil { + return nil, fmt.Errorf("failed to extract all listeners: %w", err) + } + + // Create a map to group listeners by Load Balancer ID + listenerMap := make(map[string][]listeners.Listener) + for _, listener := range allListeners { + // Iterate through each associated Load Balancer ID in the Loadbalancers array + for _, lb := range listener.Loadbalancers { + listenerMap[lb.ID] = append(listenerMap[lb.ID], listener) + } + } + + // Fetch all floating IPs + fipPages, err := floatingips.List(networkClient, floatingips.ListOpts{}).AllPages(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list floating IPs: %w", err) + } + + allFIPs, err := floatingips.ExtractFloatingIPs(fipPages) + if err != nil { + return nil, fmt.Errorf("failed to extract floating IPs: %w", err) + } + + // Create a map to associate floating IPs with their resource IDs + fipMap := make(map[string]string) // Key: LoadBalancerID/PortID, Value: Floating IP + for _, fip := range allFIPs { + if fip.PortID != "" { + fipMap[fip.PortID] = fip.FloatingIP + } + } + + tg := &targetgroup.Group{ + Source: "OS_" + i.region, + } + + for _, lb := range allLBs { + // Retrieve listeners for this load balancer from the map + lbListeners, exists := listenerMap[lb.ID] + if !exists || len(lbListeners) == 0 { + i.logger.Debug("Got no listener", "loadbalancer", lb.ID) + continue + } + + // Variable to store the port of the first PROMETHEUS listener + var listenerPort int + hasPrometheusListener := false + + // Check if any listener has the PROMETHEUS protocol + for _, listener := range lbListeners { + if listener.Protocol == "PROMETHEUS" { + hasPrometheusListener = true + listenerPort = listener.ProtocolPort + break + } + } + + // Skip LBs without PROMETHEUS listener protocol + if !hasPrometheusListener { + i.logger.Debug("Got no PROMETHEUS listener", "loadbalancer", lb.ID) + continue + } + + labels := model.LabelSet{} + addr := net.JoinHostPort(lb.VipAddress, strconv.Itoa(listenerPort)) + labels[model.AddressLabel] = model.LabelValue(addr) + labels[openstackLabelLoadBalancerID] = model.LabelValue(lb.ID) + labels[openstackLabelLoadBalancerName] = model.LabelValue(lb.Name) + labels[openstackLabelLoadBalancerOperatingStatus] = model.LabelValue(lb.OperatingStatus) + labels[openstackLabelLoadBalancerProvisioningStatus] = model.LabelValue(lb.ProvisioningStatus) + labels[openstackLabelLoadBalancerAvailabilityZone] = model.LabelValue(lb.AvailabilityZone) + labels[openstackLabelLoadBalancerVIP] = model.LabelValue(lb.VipAddress) + labels[openstackLabelLoadBalancerProvider] = model.LabelValue(lb.Provider) + labels[openstackLabelProjectID] = model.LabelValue(lb.ProjectID) + + if len(lb.Tags) > 0 { + labels[openstackLabelLoadBalancerTags] = model.LabelValue(strings.Join(lb.Tags, ",")) + } + + if floatingIP, exists := fipMap[lb.VipPortID]; exists { + labels[openstackLabelLoadBalancerFloatingIP] = model.LabelValue(floatingIP) + } + + tg.Targets = append(tg.Targets, labels) + } + + return []*targetgroup.Group{tg}, nil +} diff --git a/discovery/openstack/loadbalancer_test.go b/discovery/openstack/loadbalancer_test.go new file mode 100644 index 0000000000..eee21b9831 --- /dev/null +++ b/discovery/openstack/loadbalancer_test.go @@ -0,0 +1,137 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openstack + +import ( + "context" + "fmt" + "testing" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" +) + +type OpenstackSDLoadBalancerTestSuite struct { + Mock *SDMock +} + +func (s *OpenstackSDLoadBalancerTestSuite) SetupTest(t *testing.T) { + s.Mock = NewSDMock(t) + s.Mock.Setup() + + s.Mock.HandleLoadBalancerListSuccessfully() + s.Mock.HandleListenersListSuccessfully() + s.Mock.HandleFloatingIPListSuccessfully() + + s.Mock.HandleVersionsSuccessfully() + s.Mock.HandleAuthSuccessfully() +} + +func (s *OpenstackSDLoadBalancerTestSuite) openstackAuthSuccess() (refresher, error) { + conf := SDConfig{ + IdentityEndpoint: s.Mock.Endpoint(), + Password: "test", + Username: "test", + DomainName: "12345", + Region: "RegionOne", + Role: "loadbalancer", + } + return newRefresher(&conf, nil) +} + +func TestOpenstackSDLoadBalancerRefresh(t *testing.T) { + mock := &OpenstackSDLoadBalancerTestSuite{} + mock.SetupTest(t) + + instance, err := mock.openstackAuthSuccess() + require.NoError(t, err) + + ctx := context.Background() + tgs, err := instance.refresh(ctx) + + require.NoError(t, err) + require.Len(t, tgs, 1) + + tg := tgs[0] + require.NotNil(t, tg) + require.NotNil(t, tg.Targets) + require.Len(t, tg.Targets, 4) + + for i, lbls := range []model.LabelSet{ + { + "__address__": model.LabelValue("10.0.0.32:9273"), + "__meta_openstack_loadbalancer_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb1"), + "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), + "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), + "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.1.2"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.0.32"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_loadbalancer_tags": model.LabelValue("tag1,tag2"), + "__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), + }, + { + "__address__": model.LabelValue("10.0.2.78:8080"), + "__meta_openstack_loadbalancer_id": model.LabelValue("d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb3"), + "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), + "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az3"), + "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.3.4"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.2.78"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_loadbalancer_tags": model.LabelValue("tag5,tag6"), + "__meta_openstack_project_id": model.LabelValue("ac57f03dba1a4fdebff3e67201bc7a85"), + }, + { + "__address__": model.LabelValue("10.0.3.99:9090"), + "__meta_openstack_loadbalancer_id": model.LabelValue("f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb4"), + "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), + "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), + "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.4.5"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.3.99"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_project_id": model.LabelValue("fa8c372dfe4d4c92b0c4e3a2d9b3c9fa"), + }, + { + "__address__": model.LabelValue("10.0.4.88:9876"), + "__meta_openstack_loadbalancer_id": model.LabelValue("e83a6d92-7a3e-4567-94b3-20c83b32a75e"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb5"), + "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), + "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az4"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.4.88"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_project_id": model.LabelValue("a5d3b2e1e6f34cd9a5f7c2f01a6b8e29"), + }, + } { + t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { + require.Equal(t, lbls, tg.Targets[i]) + }) + } +} + +func TestOpenstackSDLoadBalancerRefreshWithDoneContext(t *testing.T) { + mock := &OpenstackSDLoadBalancerTestSuite{} + mock.SetupTest(t) + + loadbalancer, _ := mock.openstackAuthSuccess() + ctx, cancel := context.WithCancel(context.Background()) + cancel() + _, err := loadbalancer.refresh(ctx) + require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) +} diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index 4518f41166..34e09c710f 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -62,7 +62,7 @@ func testHeader(t *testing.T, r *http.Request, header, expected string) { // HandleVersionsSuccessfully mocks version call. func (m *SDMock) HandleVersionsSuccessfully() { - m.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + m.Mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprintf(w, ` { "versions": { @@ -90,7 +90,7 @@ func (m *SDMock) HandleVersionsSuccessfully() { // HandleAuthSuccessfully mocks auth call. func (m *SDMock) HandleAuthSuccessfully() { - m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, _ *http.Request) { w.Header().Add("X-Subject-Token", tokenID) w.WriteHeader(http.StatusCreated) @@ -124,7 +124,7 @@ func (m *SDMock) HandleAuthSuccessfully() { "type": "identity", "name": "keystone" }, - { + { "endpoints": [ { "id": "e2ffee808abc4a60916715b1d4b489dd", @@ -136,8 +136,33 @@ func (m *SDMock) HandleAuthSuccessfully() { ], "id": "b7f2a5b1a019459cb956e43a8cb41e31", "type": "compute" + }, + { + "endpoints": [ + { + "id": "5448e46679564d7d95466c2bef54c296", + "interface": "public", + "region": "RegionOne", + "region_id": "RegionOne", + "url": "%s" + } + ], + "id": "589f3d99a3d94f5f871e9f5cf206d2e8", + "type": "network" + }, + { + "endpoints": [ + { + "id": "39dc322ce86c1234b4f06c2eeae0841b", + "interface": "public", + "region": "RegionOne", + "region_id": "RegionOne", + "url": "%s" + } + ], + "id": "26968f704a68417bbddd29508455ff90", + "type": "load-balancer" } - ], "expires_at": "2013-02-27T18:30:59.999999Z", "is_domain": false, @@ -174,7 +199,7 @@ func (m *SDMock) HandleAuthSuccessfully() { } } } - `, m.Endpoint()) + `, m.Endpoint(), m.Endpoint(), m.Endpoint()) }) } @@ -461,82 +486,159 @@ const serverListBody = ` "metadata": {} }, { - "status": "ACTIVE", - "updated": "2014-09-25T13:04:49Z", - "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", - "OS-EXT-SRV-ATTR:host": "devstack", - "addresses": { - "private": [ - { - "version": 4, - "addr": "10.0.0.33", - "OS-EXT-IPS:type": "fixed" - }, - { - "version": 4, - "addr": "10.0.0.34", - "OS-EXT-IPS:type": "fixed" - }, - { - "version": 4, - "addr": "10.10.10.4", - "OS-EXT-IPS:type": "floating" - } - ] - }, - "links": [ - { - "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "rel": "self" + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "version": 4, + "addr": "10.0.0.33", + "OS-EXT-IPS:type": "fixed" + }, + { + "version": 4, + "addr": "10.0.0.34", + "OS-EXT-IPS:type": "fixed" + }, + { + "version": 4, + "addr": "10.10.10.4", + "OS-EXT-IPS:type": "floating" + } + ] }, - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "rel": "bookmark" - } - ], - "key_name": null, - "image": "", - "OS-EXT-STS:task_state": null, - "OS-EXT-STS:vm_state": "active", - "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", - "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", - "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", - "flavor": { - "vcpus": 2, - "ram": 4096, - "disk": 0, - "ephemeral": 0, - "swap": 0, - "original_name": "m1.small", - "extra_specs": { - "aggregate_instance_extra_specs:general": "true", - "hw:mem_page_size": "large", - "hw:vif_multiqueue_enabled": "true" + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": "", + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "vcpus": 2, + "ram": 4096, + "disk": 0, + "ephemeral": 0, + "swap": 0, + "original_name": "m1.small", + "extra_specs": { + "aggregate_instance_extra_specs:general": "true", + "hw:mem_page_size": "large", + "hw:vif_multiqueue_enabled": "true" + } + }, + "id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "merp", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": { + "env": "prod" } }, - "id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", - "security_groups": [ - { - "name": "default" + { + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "version": 4, + "addr": "10.0.0.33", + "OS-EXT-IPS:type": "fixed" + }, + { + "version": 4, + "addr": "10.0.0.34", + "OS-EXT-IPS:type": "fixed" + }, + { + "version": 4, + "addr": "10.10.10.24", + "OS-EXT-IPS:type": "floating" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/b78fef2305934dbbbeb9a10b4c326f7a/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/b78fef2305934dbbbeb9a10b4c326f7a/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": "", + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000002d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "vcpus": 2, + "ram": 4096, + "disk": 0, + "ephemeral": 0, + "swap": 0, + "original_name": "m1.small", + "extra_specs": { + "aggregate_instance_extra_specs:general": "true", + "hw:mem_page_size": "large", + "hw:vif_multiqueue_enabled": "true" + } + }, + "id": "87caf8ed-d92a-41f6-9dcd-d1399e39899f", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "merp-project2", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": { + "env": "prod" } - ], - "OS-SRV-USG:terminated_at": null, - "OS-EXT-AZ:availability_zone": "nova", - "user_id": "9349aff8be7545ac9d2f1d00999a23cd", - "name": "merp", - "created": "2014-09-25T13:04:41Z", - "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", - "OS-DCF:diskConfig": "MANUAL", - "os-extended-volumes:volumes_attached": [], - "accessIPv4": "", - "accessIPv6": "", - "progress": 0, - "OS-EXT-STS:power_state": 1, - "config_drive": "", - "metadata": { - "env": "prod" } - } ] } ` @@ -554,35 +656,139 @@ func (m *SDMock) HandleServerListSuccessfully() { const listOutput = ` { - "floating_ips": [ - { - "fixed_ip": null, - "id": "1", - "instance_id": null, - "ip": "10.10.10.1", - "pool": "nova" - }, - { - "fixed_ip": "10.0.0.32", - "id": "2", - "instance_id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", - "ip": "10.10.10.2", - "pool": "nova" - }, - { - "fixed_ip": "10.0.0.34", - "id": "3", - "instance_id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", - "ip": "10.10.10.4", - "pool": "nova" - } - ] + "floatingips": [ + { + "id": "03a77860-ae03-46c4-b502-caea11467a79", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "floating_ip_address": "10.10.10.1", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "d5597901-48c8-4a69-a041-cfc5be158a04", + "fixed_ip_address": null, + "status": "ACTIVE", + "description": "", + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2023-08-30T16:30:27Z", + "updated_at": "2023-08-30T16:30:28Z" + }, + { + "id": "03e28c79-5a4c-491e-a4fe-3ff6bba830c6", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "floating_ip_address": "10.10.10.2", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "4a45b012-0478-484d-8cf3-c8abdb194d08", + "fixed_ip_address": "10.0.0.32", + "status": "ACTIVE", + "description": "", + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2023-09-06T15:45:36Z", + "updated_at": "2023-09-06T15:45:36Z" + }, + { + "id": "087fcdd2-1d13-4f72-9c0e-c759e796d558", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "floating_ip_address": "10.10.10.4", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "a0e244e8-7910-4427-b8d1-20470cad4f8a", + "fixed_ip_address": "10.0.0.34", + "status": "ACTIVE", + "description": "", + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2024-01-24T13:30:50Z", + "updated_at": "2024-01-24T13:30:51Z" + }, + { + "id": "b23df91a-a74a-4f75-b252-750aff4a5a0c", + "tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a", + "floating_ip_address": "10.10.10.24", + "floating_network_id": "b19ff5bc-a49a-46cc-8d14-ca5f1e94791f", + "router_id": "65a5e5af-17f0-4124-9a81-c08b44f5b8a7", + "port_id": "b926ab68-ec54-46d8-8c50-1c07aafd5ae9", + "fixed_ip_address": "10.0.0.34", + "status": "ACTIVE", + "description": "", + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2024-01-24T13:30:50Z", + "updated_at": "2024-01-24T13:30:51Z" + }, + { + "id": "fea7332d-9027-4cf9-bf62-c3c4c6ebaf84", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "floating_ip_address": "192.168.1.2", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "b47c39f5-238d-4b17-ae87-9b5d19af8a2e", + "fixed_ip_address": "10.0.0.32", + "status": "ACTIVE", + "description": "", + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2023-08-30T15:11:37Z", + "updated_at": "2023-08-30T15:11:38Z", + "revision_number": 1, + "project_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "febb9554-cf83-4f9b-94d9-1b3c34be357f", + "tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85", + "floating_ip_address": "192.168.3.4", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "c83b6e12-4e5d-4673-a4b3-5bc72a7f3ef9", + "fixed_ip_address": "10.0.2.78", + "status": "ACTIVE", + "description": "", + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2023-08-30T15:11:37Z", + "updated_at": "2023-08-30T15:11:38Z", + "revision_number": 1, + "project_id": "ac57f03dba1a4fdebff3e67201bc7a85" + }, + { + "id": "febb9554-cf83-4f9b-94d9-1b3c34be357f", + "tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa", + "floating_ip_address": "192.168.4.5", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "f9e8b6e12-7e4d-4963-a5b3-6cd82a7f3ff6", + "fixed_ip_address": "10.0.3.99", + "status": "ACTIVE", + "description": "", + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2023-08-30T15:11:37Z", + "updated_at": "2023-08-30T15:11:38Z", + "revision_number": 1, + "project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa" + } + ] } ` // HandleFloatingIPListSuccessfully mocks floating ips call. func (m *SDMock) HandleFloatingIPListSuccessfully() { - m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { + m.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { testMethod(m.t, r, http.MethodGet) testHeader(m.t, r, "X-Auth-Token", tokenID) @@ -590,3 +796,608 @@ func (m *SDMock) HandleFloatingIPListSuccessfully() { fmt.Fprint(w, listOutput) }) } + +const portsListBody = ` +{ + "ports": [ + { + "id": "d5597901-48c8-4a69-a041-cfc5be158a04", + "name": "", + "network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "mac_address": "", + "admin_state_up": true, + "status": "DOWN", + "device_id": "", + "device_owner": "", + "fixed_ips": [], + "allowed_address_pairs": [], + "extra_dhcp_opts": [], + "security_groups": [], + "description": "", + "binding:vnic_type": "normal", + "port_security_enabled": true, + "dns_name": "", + "dns_assignment": [], + "dns_domain": "", + "tags": [], + "created_at": "2023-08-30T16:30:27Z", + "updated_at": "2023-08-30T16:30:28Z", + "revision_number": 0, + "project_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "4a45b012-0478-484d-8cf3-c8abdb194d08", + "name": "ovn-lb-vip-0980c8de-58c3-481d-89e3-ed81f44286c0", + "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "mac_address": "fa:16:3e:23:12:a3", + "admin_state_up": true, + "status": "ACTIVE", + "device_id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "device_owner": "", + "fixed_ips": [ + { + "subnet_id": "", + "ip_address": "10.10.10.2" + } + ], + "allowed_address_pairs": [], + "extra_dhcp_opts": [], + "security_groups": [], + "description": "", + "binding:vnic_type": "normal", + "port_security_enabled": true, + "dns_name": "", + "dns_assignment": [], + "dns_domain": "", + "tags": [], + "created_at": "2023-09-06T15:45:36Z", + "updated_at": "2023-09-06T15:45:36Z", + "revision_number": 0, + "project_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "a0e244e8-7910-4427-b8d1-20470cad4f8a", + "name": "ovn-lb-vip-26c0ccb1-3036-4345-99e8-d8f34a8ba6b2", + "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "mac_address": "fa:16:3e:5f:43:10", + "admin_state_up": true, + "status": "ACTIVE", + "device_id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", + "device_owner": "", + "fixed_ips": [ + { + "subnet_id": "", + "ip_address": "10.10.10.4" + } + ], + "allowed_address_pairs": [], + "extra_dhcp_opts": [], + "security_groups": [], + "description": "", + "binding:vnic_type": "normal", + "port_security_enabled": true, + "dns_name": "", + "dns_assignment": [], + "dns_domain": "", + "tags": [], + "created_at": "2024-01-24T13:30:50Z", + "updated_at": "2024-01-24T13:30:51Z", + "revision_number": 0, + "project_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "b926ab68-ec54-46d8-8c50-1c07aafd5ae9", + "name": "dummy-port", + "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", + "tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a", + "mac_address": "fa:16:3e:5f:12:10", + "admin_state_up": true, + "status": "ACTIVE", + "device_id": "87caf8ed-d92a-41f6-9dcd-d1399e39899f", + "device_owner": "", + "fixed_ips": [ + { + "subnet_id": "", + "ip_address": "10.10.10.24" + } + ], + "allowed_address_pairs": [], + "extra_dhcp_opts": [], + "security_groups": [], + "description": "", + "binding:vnic_type": "normal", + "port_security_enabled": true, + "dns_name": "", + "dns_assignment": [], + "dns_domain": "", + "tags": [], + "created_at": "2024-01-24T13:30:50Z", + "updated_at": "2024-01-24T13:30:51Z", + "revision_number": 0, + "project_id": "b78fef2305934dbbbeb9a10b4c326f7a" + } + ] +} +` + +// HandlePortsListSuccessfully mocks the ports list API. +func (m *SDMock) HandlePortsListSuccessfully() { + m.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + testMethod(m.t, r, http.MethodGet) + testHeader(m.t, r, "X-Auth-Token", tokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, portsListBody) + }) +} + +const lbListBody = ` +{ + "loadbalancers": [ + { + "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "name": "lb1", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "project_id": "fcad67a6189847c4aecfa3c81a05783b", + "created_at": "2024-12-01T10:00:00", + "updated_at": "2024-12-01T10:30:00", + "vip_address": "10.0.0.32", + "vip_port_id": "b47c39f5-238d-4b17-ae87-9b5d19af8a2e", + "vip_subnet_id": "14a4c6a5-fe71-4a94-9071-4cd12fb8337f", + "vip_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "tags": ["tag1", "tag2"], + "availability_zone": "az1", + "vip_vnic_type": "normal", + "provider": "amphora", + "listeners": [ + { + "id": "c4146b54-febc-4caf-a53f-ed1cab6faba5" + }, + { + "id": "a058d20e-82de-4eff-bb65-5c76a8554435" + } + ], + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54", + "name": "lb3", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "project_id": "ac57f03dba1a4fdebff3e67201bc7a85", + "created_at": "2024-12-01T12:00:00", + "updated_at": "2024-12-01T12:45:00", + "vip_address": "10.0.2.78", + "vip_port_id": "c83b6e12-4e5d-4673-a4b3-5bc72a7f3ef9", + "vip_subnet_id": "36c5e9f6-e7a2-4975-a8c6-3b8e4f93cf45", + "vip_network_id": "g03c6f27-e617-4975-c8f7-4c9f3f94cf68", + "tags": ["tag5", "tag6"], + "availability_zone": "az3", + "vip_vnic_type": "normal", + "provider": "amphora", + "listeners": [ + { + "id": "5b9529a4-6cbf-48f8-a006-d99cbc717da0" + }, + { + "id": "5d26333b-74d1-4b2a-90ab-2b2c0f5a8048" + } + ], + "tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85" + }, + { + "id": "f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67", + "name": "lb4", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa", + "created_at": "2024-12-01T13:00:00", + "updated_at": "2024-12-01T13:20:00", + "vip_address": "10.0.3.99", + "vip_port_id": "f9e8b6e12-7e4d-4963-a5b3-6cd82a7f3ff6", + "vip_subnet_id": "47d6f8f9-f7b2-4876-a9d8-4e8f4g95df79", + "vip_network_id": "h04d7f38-f718-4876-d9g8-5d8g5h95df89", + "tags": [], + "availability_zone": "az1", + "vip_vnic_type": "normal", + "provider": "amphora", + "listeners": [ + { + "id": "84c87596-1ff0-4f6d-b151-0a78e1f407a3" + }, + { + "id": "fe460a7c-16a9-4984-9fe6-f6e5153ebab1" + } + ], + "tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa" + }, + { + "id": "e83a6d92-7a3e-4567-94b3-20c83b32a75e", + "name": "lb5", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "project_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29", + "created_at": "2024-12-01T11:00:00", + "updated_at": "2024-12-01T11:15:00", + "vip_address": "10.0.4.88", + "vip_port_id": "d83a6d92-7a3e-4567-94b3-20c83b32a75e", + "vip_subnet_id": "25b4d8e5-fe81-4a87-9071-4cc12fb8337f", + "vip_network_id": "f02c5e19-c507-4864-b16e-2b7a39e56be3", + "tags": [], + "availability_zone": "az4", + "vip_vnic_type": "normal", + "provider": "amphora", + "listeners": [ + { + "id": "50902e62-34b8-46b2-9ed4-9053e7ad46dc" + }, + { + "id": "98a867ad-ff07-4880-b05f-32088866a68a" + } + ], + "tenant_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29" + } + ] +} +` + +// HandleLoadBalancerListSuccessfully mocks the load balancer list API. +func (m *SDMock) HandleLoadBalancerListSuccessfully() { + m.Mux.HandleFunc("/v2.0/lbaas/loadbalancers", func(w http.ResponseWriter, r *http.Request) { + testMethod(m.t, r, http.MethodGet) + testHeader(m.t, r, "X-Auth-Token", tokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, lbListBody) + }) +} + +const listenerListBody = ` +{ + "listeners": [ + { + "id": "c4146b54-febc-4caf-a53f-ed1cab6faba5", + "name": "stats-listener", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "PROMETHEUS", + "protocol_port": 9273, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "fcad67a6189847c4aecfa3c81a05783b", + "default_pool_id": null, + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-08-29T18:05:24", + "updated_at": "2024-12-04T21:21:10", + "loadbalancers": [ + { + "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "5b9529a4-6cbf-48f8-a006-d99cbc717da0", + "name": "stats-listener2", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "PROMETHEUS", + "protocol_port": 8080, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "ac57f03dba1a4fdebff3e67201bc7a85", + "default_pool_id": null, + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-08-29T18:05:24", + "updated_at": "2024-12-04T21:21:10", + "loadbalancers": [ + { + "id": "d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85" + }, + { + "id": "84c87596-1ff0-4f6d-b151-0a78e1f407a3", + "name": "stats-listener3", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "PROMETHEUS", + "protocol_port": 9090, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa", + "default_pool_id": null, + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-08-29T18:05:24", + "updated_at": "2024-12-04T21:21:10", + "loadbalancers": [ + { + "id": "f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa" + }, + { + "id": "50902e62-34b8-46b2-9ed4-9053e7ad46dc", + "name": "stats-listener4", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "PROMETHEUS", + "protocol_port": 9876, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29", + "default_pool_id": null, + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-08-29T18:05:24", + "updated_at": "2024-12-04T21:21:10", + "loadbalancers": [ + { + "id": "e83a6d92-7a3e-4567-94b3-20c83b32a75e" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29" + }, + { + "id": "a058d20e-82de-4eff-bb65-5c76a8554435", + "name": "port6443", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "TCP", + "protocol_port": 6443, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29", + "default_pool_id": "5643208b-b691-4b1f-a6b8-356f14903e56", + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-10-02T19:32:48", + "updated_at": "2024-12-04T21:44:34", + "loadbalancers": [ + { + "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29" + }, + { + "id": "5d26333b-74d1-4b2a-90ab-2b2c0f5a8048", + "name": "port6444", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "TCP", + "protocol_port": 6444, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "ac57f03dba1a4fdebff3e67201bc7a85", + "default_pool_id": "5643208b-b691-4b1f-a6b8-356f14903e56", + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-10-02T19:32:48", + "updated_at": "2024-12-04T21:44:34", + "loadbalancers": [ + { + "id": "d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85" + }, + { + "id": "fe460a7c-16a9-4984-9fe6-f6e5153ebab1", + "name": "port6445", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "TCP", + "protocol_port": 6445, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa", + "default_pool_id": "5643208b-b691-4b1f-a6b8-356f14903e56", + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-10-02T19:32:48", + "updated_at": "2024-12-04T21:44:34", + "loadbalancers": [ + { + "id": "f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa" + }, + { + "id": "98a867ad-ff07-4880-b05f-32088866a68a", + "name": "port6446", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "TCP", + "protocol_port": 6446, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29", + "default_pool_id": "5643208b-b691-4b1f-a6b8-356f14903e56", + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-10-02T19:32:48", + "updated_at": "2024-12-04T21:44:34", + "loadbalancers": [ + { + "id": "e83a6d92-7a3e-4567-94b3-20c83b32a75e" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29" + } + ] +} +` + +// HandleListenersListSuccessfully mocks the listeners endpoint. +func (m *SDMock) HandleListenersListSuccessfully() { + m.Mux.HandleFunc("/v2.0/lbaas/listeners", func(w http.ResponseWriter, r *http.Request) { + testMethod(m.t, r, http.MethodGet) + testHeader(m.t, r, "X-Auth-Token", tokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, listenerListBody) + }) +} diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index c98f78788d..d7b58787a1 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -17,12 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "time" - "github.com/go-kit/log" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack" "github.com/mwitkow/go-conntrack" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -67,7 +67,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &openstackMetrics{ refreshMetrics: rmi, } @@ -97,6 +97,9 @@ const ( // OpenStack document reference // https://docs.openstack.org/horizon/pike/user/launch-instances.html OpenStackRoleInstance Role = "instance" + // Openstack document reference + // https://docs.openstack.org/openstacksdk/rocky/user/resources/load_balancer/index.html + OpenStackRoleLoadBalancer Role = "loadbalancer" ) // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -105,7 +108,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } switch *c { - case OpenStackRoleHypervisor, OpenStackRoleInstance: + case OpenStackRoleHypervisor, OpenStackRoleInstance, OpenStackRoleLoadBalancer: return nil default: return fmt.Errorf("unknown OpenStack SD role %q", *c) @@ -128,7 +131,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } if c.Role == "" { - return errors.New("role missing (one of: instance, hypervisor)") + return errors.New("role missing (one of: instance, hypervisor, loadbalancer)") } if c.Region == "" { return errors.New("openstack SD configuration requires a region") @@ -142,10 +145,10 @@ type refresher interface { } // NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*openstackMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } r, err := newRefresher(conf, l) @@ -163,7 +166,7 @@ func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetr ), nil } -func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { +func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) { var opts gophercloud.AuthOptions if conf.IdentityEndpoint == "" { var err error @@ -211,6 +214,8 @@ func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { return newHypervisorDiscovery(client, &opts, conf.Port, conf.Region, availability, l), nil case OpenStackRoleInstance: return newInstanceDiscovery(client, &opts, conf.Port, conf.Region, conf.AllTenants, availability, l), nil + case OpenStackRoleLoadBalancer: + return newLoadBalancerDiscovery(client, &opts, conf.Region, availability, l), nil } return nil, errors.New("unknown OpenStack discovery role") } diff --git a/discovery/ovhcloud/dedicated_server.go b/discovery/ovhcloud/dedicated_server.go index a70857a08b..15bb9809c9 100644 --- a/discovery/ovhcloud/dedicated_server.go +++ b/discovery/ovhcloud/dedicated_server.go @@ -16,13 +16,12 @@ package ovhcloud import ( "context" "fmt" + "log/slog" "net/netip" "net/url" "path" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/ovh/go-ovh/ovh" "github.com/prometheus/common/model" @@ -55,10 +54,10 @@ type dedicatedServer struct { type dedicatedServerDiscovery struct { *refresh.Discovery config *SDConfig - logger log.Logger + logger *slog.Logger } -func newDedicatedServerDiscovery(conf *SDConfig, logger log.Logger) *dedicatedServerDiscovery { +func newDedicatedServerDiscovery(conf *SDConfig, logger *slog.Logger) *dedicatedServerDiscovery { return &dedicatedServerDiscovery{config: conf, logger: logger} } @@ -115,10 +114,7 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou for _, dedicatedServerName := range dedicatedServerList { dedicatedServer, err := getDedicatedServerDetails(client, dedicatedServerName) if err != nil { - err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error()) - if err != nil { - return nil, err - } + d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error()) continue } dedicatedServerDetailedList = append(dedicatedServerDetailedList, *dedicatedServer) diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go index 52311bcc87..f9dbd6af9c 100644 --- a/discovery/ovhcloud/dedicated_server_test.go +++ b/discovery/ovhcloud/dedicated_server_test.go @@ -21,8 +21,8 @@ import ( "os" "testing" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) @@ -41,7 +41,7 @@ application_secret: %s consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecretTest, ovhcloudConsumerKeyTest) require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) - d, err := newRefresher(&cfg, log.NewNopLogger()) + d, err := newRefresher(&cfg, promslog.NewNopLogger()) require.NoError(t, err) ctx := context.Background() targetGroups, err := d.refresh(ctx) diff --git a/discovery/ovhcloud/ovhcloud.go b/discovery/ovhcloud/ovhcloud.go index 988b4482f2..492bca603a 100644 --- a/discovery/ovhcloud/ovhcloud.go +++ b/discovery/ovhcloud/ovhcloud.go @@ -17,10 +17,10 @@ import ( "context" "errors" "fmt" + "log/slog" "net/netip" "time" - "github.com/go-kit/log" "github.com/ovh/go-ovh/ovh" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -54,7 +54,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &ovhcloudMetrics{ refreshMetrics: rmi, } @@ -137,7 +137,7 @@ func parseIPList(ipList []string) ([]netip.Addr, error) { return ipAddresses, nil } -func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) { +func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) { switch conf.Service { case "vps": return newVpsDiscovery(conf, logger), nil @@ -148,10 +148,10 @@ func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) { } // NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*ovhcloudMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } r, err := newRefresher(conf, logger) diff --git a/discovery/ovhcloud/ovhcloud_test.go b/discovery/ovhcloud/ovhcloud_test.go index 9c95bf90e6..84a35af3ad 100644 --- a/discovery/ovhcloud/ovhcloud_test.go +++ b/discovery/ovhcloud/ovhcloud_test.go @@ -20,11 +20,11 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/util/testutil" ) var ( @@ -121,7 +121,7 @@ func TestParseIPs(t *testing.T) { func TestDiscoverer(t *testing.T) { conf, _ := getMockConf("vps") - logger := testutil.NewLogger(t) + logger := promslog.NewNopLogger() reg := prometheus.NewRegistry() refreshMetrics := discovery.NewRefreshMetrics(reg) diff --git a/discovery/ovhcloud/vps.go b/discovery/ovhcloud/vps.go index 58ceeabd87..7050f826a5 100644 --- a/discovery/ovhcloud/vps.go +++ b/discovery/ovhcloud/vps.go @@ -16,13 +16,12 @@ package ovhcloud import ( "context" "fmt" + "log/slog" "net/netip" "net/url" "path" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/ovh/go-ovh/ovh" "github.com/prometheus/common/model" @@ -68,10 +67,10 @@ type virtualPrivateServer struct { type vpsDiscovery struct { *refresh.Discovery config *SDConfig - logger log.Logger + logger *slog.Logger } -func newVpsDiscovery(conf *SDConfig, logger log.Logger) *vpsDiscovery { +func newVpsDiscovery(conf *SDConfig, logger *slog.Logger) *vpsDiscovery { return &vpsDiscovery{config: conf, logger: logger} } @@ -133,10 +132,7 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { for _, vpsName := range vpsList { vpsDetailed, err := getVpsDetails(client, vpsName) if err != nil { - err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error()) - if err != nil { - return nil, err - } + d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error()) continue } vpsDetailedList = append(vpsDetailedList, *vpsDetailed) diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go index 2d2d6dcd21..d7a2a705c6 100644 --- a/discovery/ovhcloud/vps_test.go +++ b/discovery/ovhcloud/vps_test.go @@ -21,11 +21,10 @@ import ( "os" "testing" - yaml "gopkg.in/yaml.v2" - - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" + yaml "gopkg.in/yaml.v2" ) func TestOvhCloudVpsRefresh(t *testing.T) { @@ -43,7 +42,7 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) - d, err := newRefresher(&cfg, log.NewNopLogger()) + d, err := newRefresher(&cfg, promslog.NewNopLogger()) require.NoError(t, err) ctx := context.Background() targetGroups, err := d.refresh(ctx) diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go index 8f89acbf93..e249bc4afa 100644 --- a/discovery/puppetdb/puppetdb.go +++ b/discovery/puppetdb/puppetdb.go @@ -17,8 +17,10 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" + "log/slog" "net" "net/http" "net/url" @@ -27,11 +29,11 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" @@ -62,7 +64,7 @@ var ( HTTPClientConfig: config.DefaultHTTPClientConfig, } matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) - userAgent = fmt.Sprintf("Prometheus/%s", version.Version) + userAgent = version.PrometheusUserAgent() ) func init() { @@ -80,7 +82,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &puppetdbMetrics{ refreshMetrics: rmi, } @@ -108,20 +110,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if c.URL == "" { - return fmt.Errorf("URL is missing") + return errors.New("URL is missing") } parsedURL, err := url.Parse(c.URL) if err != nil { return err } if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { - return fmt.Errorf("URL scheme must be 'http' or 'https'") + return errors.New("URL scheme must be 'http' or 'https'") } if parsedURL.Host == "" { - return fmt.Errorf("host is missing in URL") + return errors.New("host is missing in URL") } if c.Query == "" { - return fmt.Errorf("query missing") + return errors.New("query missing") } return c.HTTPClientConfig.Validate() } @@ -138,14 +140,14 @@ type Discovery struct { } // NewDiscovery returns a new PuppetDB discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*puppetdbMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http") diff --git a/discovery/puppetdb/puppetdb_test.go b/discovery/puppetdb/puppetdb_test.go index bf9c7b215e..57e198e131 100644 --- a/discovery/puppetdb/puppetdb_test.go +++ b/discovery/puppetdb/puppetdb_test.go @@ -22,10 +22,10 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -70,7 +70,7 @@ func TestPuppetSlashInURL(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) require.Equal(t, apiURL, d.url) @@ -94,7 +94,7 @@ func TestPuppetDBRefresh(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -142,7 +142,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -184,7 +184,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) { } func TestPuppetDBInvalidCode(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusBadRequest) })) @@ -201,7 +201,7 @@ func TestPuppetDBInvalidCode(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() @@ -212,7 +212,7 @@ func TestPuppetDBInvalidCode(t *testing.T) { } func TestPuppetDBInvalidFormat(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fmt.Fprintln(w, "{}") })) @@ -229,7 +229,7 @@ func TestPuppetDBInvalidFormat(t *testing.T) { metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) require.NoError(t, metrics.Register()) - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/refresh/refresh.go b/discovery/refresh/refresh.go index f037a90cff..31646c0e4c 100644 --- a/discovery/refresh/refresh.go +++ b/discovery/refresh/refresh.go @@ -16,17 +16,17 @@ package refresh import ( "context" "errors" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) type Options struct { - Logger log.Logger + Logger *slog.Logger Mech string Interval time.Duration RefreshF func(ctx context.Context) ([]*targetgroup.Group, error) @@ -35,7 +35,7 @@ type Options struct { // Discovery implements the Discoverer interface. type Discovery struct { - logger log.Logger + logger *slog.Logger interval time.Duration refreshf func(ctx context.Context) ([]*targetgroup.Group, error) metrics *discovery.RefreshMetrics @@ -45,9 +45,9 @@ type Discovery struct { func NewDiscovery(opts Options) *Discovery { m := opts.MetricsInstantiator.Instantiate(opts.Mech) - var logger log.Logger + var logger *slog.Logger if opts.Logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } else { logger = opts.Logger } @@ -68,7 +68,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + d.logger.Error("Unable to refresh target groups", "err", err.Error()) } } else { select { @@ -87,7 +87,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + d.logger.Error("Unable to refresh target groups", "err", err.Error()) } continue } diff --git a/discovery/refresh/refresh_test.go b/discovery/refresh/refresh_test.go index b70a326355..7c57d0532a 100644 --- a/discovery/refresh/refresh_test.go +++ b/discovery/refresh/refresh_test.go @@ -15,7 +15,7 @@ package refresh import ( "context" - "fmt" + "errors" "testing" "time" @@ -56,7 +56,7 @@ func TestRefresh(t *testing.T) { } var i int - refresh := func(ctx context.Context) ([]*targetgroup.Group, error) { + refresh := func(_ context.Context) ([]*targetgroup.Group, error) { i++ switch i { case 1: @@ -64,7 +64,7 @@ func TestRefresh(t *testing.T) { case 2: return tg2, nil } - return nil, fmt.Errorf("some error") + return nil, errors.New("some error") } interval := time.Millisecond diff --git a/discovery/registry.go b/discovery/registry.go index 1f491d4ca9..93b88ccfab 100644 --- a/discovery/registry.go +++ b/discovery/registry.go @@ -22,9 +22,8 @@ import ( "strings" "sync" - "gopkg.in/yaml.v2" - "github.com/prometheus/client_golang/prometheus" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -267,7 +266,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) { err := rmm.Register() if err != nil { - return nil, fmt.Errorf("failed to create service discovery refresh metrics") + return nil, errors.New("failed to create service discovery refresh metrics") } metrics := make(map[string]DiscovererMetrics) @@ -275,7 +274,7 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm) err = currentSdMetrics.Register() if err != nil { - return nil, fmt.Errorf("failed to create service discovery metrics") + return nil, errors.New("failed to create service discovery metrics") } metrics[conf.Name()] = currentSdMetrics } diff --git a/discovery/scaleway/baremetal.go b/discovery/scaleway/baremetal.go index c313e6695d..06f13532df 100644 --- a/discovery/scaleway/baremetal.go +++ b/discovery/scaleway/baremetal.go @@ -93,7 +93,7 @@ func newBaremetalDiscovery(conf *SDConfig) (*baremetalDiscovery, error) { Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }), - scw.WithUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)), + scw.WithUserAgent(version.PrometheusUserAgent()), scw.WithProfile(profile), ) if err != nil { diff --git a/discovery/scaleway/instance.go b/discovery/scaleway/instance.go index 2542c63253..162a75e407 100644 --- a/discovery/scaleway/instance.go +++ b/discovery/scaleway/instance.go @@ -35,28 +35,30 @@ import ( const ( instanceLabelPrefix = metaLabelPrefix + "instance_" - instanceBootTypeLabel = instanceLabelPrefix + "boot_type" - instanceHostnameLabel = instanceLabelPrefix + "hostname" - instanceIDLabel = instanceLabelPrefix + "id" - instanceImageArchLabel = instanceLabelPrefix + "image_arch" - instanceImageIDLabel = instanceLabelPrefix + "image_id" - instanceImageNameLabel = instanceLabelPrefix + "image_name" - instanceLocationClusterID = instanceLabelPrefix + "location_cluster_id" - instanceLocationHypervisorID = instanceLabelPrefix + "location_hypervisor_id" - instanceLocationNodeID = instanceLabelPrefix + "location_node_id" - instanceNameLabel = instanceLabelPrefix + "name" - instanceOrganizationLabel = instanceLabelPrefix + "organization_id" - instancePrivateIPv4Label = instanceLabelPrefix + "private_ipv4" - instanceProjectLabel = instanceLabelPrefix + "project_id" - instancePublicIPv4Label = instanceLabelPrefix + "public_ipv4" - instancePublicIPv6Label = instanceLabelPrefix + "public_ipv6" - instanceSecurityGroupIDLabel = instanceLabelPrefix + "security_group_id" - instanceSecurityGroupNameLabel = instanceLabelPrefix + "security_group_name" - instanceStateLabel = instanceLabelPrefix + "status" - instanceTagsLabel = instanceLabelPrefix + "tags" - instanceTypeLabel = instanceLabelPrefix + "type" - instanceZoneLabel = instanceLabelPrefix + "zone" - instanceRegionLabel = instanceLabelPrefix + "region" + instanceBootTypeLabel = instanceLabelPrefix + "boot_type" + instanceHostnameLabel = instanceLabelPrefix + "hostname" + instanceIDLabel = instanceLabelPrefix + "id" + instanceImageArchLabel = instanceLabelPrefix + "image_arch" + instanceImageIDLabel = instanceLabelPrefix + "image_id" + instanceImageNameLabel = instanceLabelPrefix + "image_name" + instanceLocationClusterID = instanceLabelPrefix + "location_cluster_id" + instanceLocationHypervisorID = instanceLabelPrefix + "location_hypervisor_id" + instanceLocationNodeID = instanceLabelPrefix + "location_node_id" + instanceNameLabel = instanceLabelPrefix + "name" + instanceOrganizationLabel = instanceLabelPrefix + "organization_id" + instancePrivateIPv4Label = instanceLabelPrefix + "private_ipv4" + instanceProjectLabel = instanceLabelPrefix + "project_id" + instancePublicIPv4Label = instanceLabelPrefix + "public_ipv4" + instancePublicIPv6Label = instanceLabelPrefix + "public_ipv6" + instancePublicIPv4AddressesLabel = instanceLabelPrefix + "public_ipv4_addresses" + instancePublicIPv6AddressesLabel = instanceLabelPrefix + "public_ipv6_addresses" + instanceSecurityGroupIDLabel = instanceLabelPrefix + "security_group_id" + instanceSecurityGroupNameLabel = instanceLabelPrefix + "security_group_name" + instanceStateLabel = instanceLabelPrefix + "status" + instanceTagsLabel = instanceLabelPrefix + "tags" + instanceTypeLabel = instanceLabelPrefix + "type" + instanceZoneLabel = instanceLabelPrefix + "zone" + instanceRegionLabel = instanceLabelPrefix + "region" ) type instanceDiscovery struct { @@ -104,7 +106,7 @@ func newInstanceDiscovery(conf *SDConfig) (*instanceDiscovery, error) { Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }), - scw.WithUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)), + scw.WithUserAgent(version.PrometheusUserAgent()), scw.WithProfile(profile), ) if err != nil { @@ -175,14 +177,43 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, } addr := "" + if len(server.PublicIPs) > 0 { + var ipv4Addresses []string + var ipv6Addresses []string + + for _, ip := range server.PublicIPs { + switch ip.Family { + case instance.ServerIPIPFamilyInet: + ipv4Addresses = append(ipv4Addresses, ip.Address.String()) + case instance.ServerIPIPFamilyInet6: + ipv6Addresses = append(ipv6Addresses, ip.Address.String()) + } + } + + if len(ipv6Addresses) > 0 { + labels[instancePublicIPv6AddressesLabel] = model.LabelValue( + separator + + strings.Join(ipv6Addresses, separator) + + separator) + } + if len(ipv4Addresses) > 0 { + labels[instancePublicIPv4AddressesLabel] = model.LabelValue( + separator + + strings.Join(ipv4Addresses, separator) + + separator) + } + } + if server.IPv6 != nil { //nolint:staticcheck labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) //nolint:staticcheck addr = server.IPv6.Address.String() //nolint:staticcheck } if server.PublicIP != nil { //nolint:staticcheck - labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) //nolint:staticcheck - addr = server.PublicIP.Address.String() //nolint:staticcheck + if server.PublicIP.Family != instance.ServerIPIPFamilyInet6 { //nolint:staticcheck + labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) //nolint:staticcheck + } + addr = server.PublicIP.Address.String() //nolint:staticcheck } if server.PrivateIP != nil { diff --git a/discovery/scaleway/instance_test.go b/discovery/scaleway/instance_test.go index ae70a9ed25..11ef36d353 100644 --- a/discovery/scaleway/instance_test.go +++ b/discovery/scaleway/instance_test.go @@ -60,7 +60,7 @@ api_url: %s tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 3) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { @@ -125,6 +125,8 @@ api_url: %s "__meta_scaleway_instance_organization_id": "20b3d507-96ac-454c-a795-bc731b46b12f", "__meta_scaleway_instance_project_id": "20b3d507-96ac-454c-a795-bc731b46b12f", "__meta_scaleway_instance_public_ipv4": "51.158.183.115", + "__meta_scaleway_instance_public_ipv4_addresses": ",51.158.183.115,", + "__meta_scaleway_instance_public_ipv6_addresses": ",2001:bc8:1640:1568:dc00:ff:fe21:91b,", "__meta_scaleway_instance_region": "nl-ams", "__meta_scaleway_instance_security_group_id": "984414da-9fc2-49c0-a925-fed6266fe092", "__meta_scaleway_instance_security_group_name": "Default security group", @@ -132,6 +134,30 @@ api_url: %s "__meta_scaleway_instance_type": "DEV1-S", "__meta_scaleway_instance_zone": "nl-ams-1", }, + { + "__address__": "163.172.136.10:80", + "__meta_scaleway_instance_boot_type": "local", + "__meta_scaleway_instance_hostname": "multiple-ips", + "__meta_scaleway_instance_id": "658abbf4-e6c6-4239-a483-3307763cf6e0", + "__meta_scaleway_instance_image_arch": "x86_64", + "__meta_scaleway_instance_image_id": "f583f58c-1ea5-44ab-a1e6-2b2e7df32a86", + "__meta_scaleway_instance_image_name": "Ubuntu 24.04 Noble Numbat", + "__meta_scaleway_instance_location_cluster_id": "7", + "__meta_scaleway_instance_location_hypervisor_id": "801", + "__meta_scaleway_instance_location_node_id": "95", + "__meta_scaleway_instance_name": "multiple-ips", + "__meta_scaleway_instance_organization_id": "ee7bd9e1-9cbd-4724-b2f4-19e50f3cf38b", + "__meta_scaleway_instance_project_id": "ee7bd9e1-9cbd-4724-b2f4-19e50f3cf38b", + "__meta_scaleway_instance_public_ipv4": "163.172.136.10", + "__meta_scaleway_instance_public_ipv4_addresses": ",163.172.136.10,212.47.248.223,51.15.231.134,", + "__meta_scaleway_instance_public_ipv6_addresses": ",2001:bc8:710:4a69:dc00:ff:fe58:40c1,2001:bc8:710:d::,2001:bc8:710:5417::,", + "__meta_scaleway_instance_region": "fr-par", + "__meta_scaleway_instance_security_group_id": "0fe819c3-274d-472a-b3f5-ddb258d2d8bb", + "__meta_scaleway_instance_security_group_name": "Default security group", + "__meta_scaleway_instance_status": "running", + "__meta_scaleway_instance_type": "PLAY2-PICO", + "__meta_scaleway_instance_zone": "fr-par-1", + }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index f8e1a83f5e..47ac092000 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -17,12 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "os" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -105,7 +105,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &scalewayMetrics{ refreshMetrics: rmi, } @@ -185,10 +185,10 @@ func init() { // the Discoverer interface. type Discovery struct{} -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*scalewayMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } r, err := newRefresher(conf) diff --git a/discovery/scaleway/testdata/instance.json b/discovery/scaleway/testdata/instance.json index b433f7598e..19a6106daa 100644 --- a/discovery/scaleway/testdata/instance.json +++ b/discovery/scaleway/testdata/instance.json @@ -356,6 +356,222 @@ "placement_group": null, "private_nics": [], "zone": "nl-ams-1" + }, + { + "id": "658abbf4-e6c6-4239-a483-3307763cf6e0", + "name": "multiple-ips", + "arch": "x86_64", + "commercial_type": "PLAY2-PICO", + "boot_type": "local", + "organization": "ee7bd9e1-9cbd-4724-b2f4-19e50f3cf38b", + "project": "ee7bd9e1-9cbd-4724-b2f4-19e50f3cf38b", + "hostname": "multiple-ips", + "image": { + "id": "f583f58c-1ea5-44ab-a1e6-2b2e7df32a86", + "name": "Ubuntu 24.04 Noble Numbat", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "project": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "root_volume": { + "id": "cfab1e2e-fa24-480a-a372-61b19e8e2fda", + "name": "Ubuntu 24.04 Noble Numbat", + "volume_type": "unified", + "size": 10000000000 + }, + "extra_volumes": { + + }, + "public": true, + "arch": "x86_64", + "creation_date": "2024-04-26T09:24:38.624912+00:00", + "modification_date": "2024-04-26T09:24:38.624912+00:00", + "default_bootscript": null, + "from_server": "", + "state": "available", + "tags": [ + + ], + "zone": "fr-par-1" + }, + "volumes": { + "0": { + "boot": false, + "id": "7d4dc5ae-3f3c-4f9c-91cf-4a47a2193e94", + "name": "Ubuntu 24.04 Noble Numbat", + "volume_type": "b_ssd", + "export_uri": null, + "organization": "ee7bd9e1-9cbd-4724-b2f4-19e50f3cf38b", + "project": "ee7bd9e1-9cbd-4724-b2f4-19e50f3cf38b", + "server": { + "id": "658abbf4-e6c6-4239-a483-3307763cf6e0", + "name": "multiple-ips" + }, + "size": 10000000000, + "state": "available", + "creation_date": "2024-06-07T13:33:17.697162+00:00", + "modification_date": "2024-06-07T13:33:17.697162+00:00", + "tags": [ + + ], + "zone": "fr-par-1" + } + }, + "tags": [ + + ], + "state": "running", + "protected": false, + "state_detail": "booted", + "public_ip": { + "id": "63fd9ede-58d7-482c-b21d-1d79d81a90dc", + "address": "163.172.136.10", + "dynamic": false, + "gateway": "62.210.0.1", + "netmask": "32", + "family": "inet", + "provisioning_mode": "dhcp", + "tags": [ + + ], + "state": "attached", + "ipam_id": "700644ed-f6a2-4c64-8508-bb867bc07673" + }, + "public_ips": [ + { + "id": "63fd9ede-58d7-482c-b21d-1d79d81a90dc", + "address": "163.172.136.10", + "dynamic": false, + "gateway": "62.210.0.1", + "netmask": "32", + "family": "inet", + "provisioning_mode": "dhcp", + "tags": [ + + ], + "state": "attached", + "ipam_id": "700644ed-f6a2-4c64-8508-bb867bc07673" + }, + { + "id": "eed4575b-90e5-4102-b956-df874c911e2b", + "address": "212.47.248.223", + "dynamic": false, + "gateway": "62.210.0.1", + "netmask": "32", + "family": "inet", + "provisioning_mode": "manual", + "tags": [ + + ], + "state": "attached", + "ipam_id": "e2bdef64-828b-4f4a-a56b-954a85759adf" + }, + { + "id": "fca8b329-6c0e-4f9c-aa88-d5c197b5919a", + "address": "51.15.231.134", + "dynamic": false, + "gateway": "62.210.0.1", + "netmask": "32", + "family": "inet", + "provisioning_mode": "manual", + "tags": [ + + ], + "state": "attached", + "ipam_id": "e56808db-b348-4b7e-ad23-995ae08dc1a1" + }, + { + "id": "3ffa6774-124c-4e64-8afb-148c15304b25", + "address": "2001:bc8:710:4a69:dc00:ff:fe58:40c1", + "dynamic": false, + "gateway": "fe80::dc00:ff:fe58:40c2", + "netmask": "64", + "family": "inet6", + "provisioning_mode": "slaac", + "tags": [ + + ], + "state": "attached", + "ipam_id": "d97773d9-fd2c-4085-92bb-5c471abd132e" + }, + { + "id": "28fcf539-8492-4603-b627-de0501ce8489", + "address": "2001:bc8:710:d::", + "dynamic": false, + "gateway": "fe80::dc00:ff:fe58:40c2", + "netmask": "64", + "family": "inet6", + "provisioning_mode": "manual", + "tags": [ + + ], + "state": "attached", + "ipam_id": "005d19ac-203c-4034-ab16-9ff4caadbdd5" + }, + { + "id": "db6fafda-3a12-403d-8c9c-1a1cb1c315ba", + "address": "2001:bc8:710:5417::", + "dynamic": false, + "gateway": "fe80::dc00:ff:fe58:40c2", + "netmask": "64", + "family": "inet6", + "provisioning_mode": "manual", + "tags": [ + + ], + "state": "attached", + "ipam_id": "8c62fac8-4134-462c-ba48-71ce4f8ac939" + } + ], + "mac_address": "de:00:00:58:40:c1", + "routed_ip_enabled": true, + "ipv6": null, + "extra_networks": [ + + ], + "dynamic_ip_required": false, + "enable_ipv6": false, + "private_ip": null, + "creation_date": "2024-06-07T13:33:17.697162+00:00", + "modification_date": "2024-06-07T13:33:26.021167+00:00", + "bootscript": { + "id": "fdfe150f-a870-4ce4-b432-9f56b5b995c1", + "public": true, + "title": "x86_64 mainline 4.4.230 rev1", + "architecture": "x86_64", + "organization": "11111111-1111-4111-8111-111111111111", + "project": "11111111-1111-4111-8111-111111111111", + "kernel": "http://10.194.3.9/kernel/x86_64-mainline-lts-4.4-4.4.230-rev1/vmlinuz-4.4.230", + "dtb": "", + "initrd": "http://10.194.3.9/initrd/initrd-Linux-x86_64-v3.14.6.gz", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "zone": "fr-par-1" + }, + "security_group": { + "id": "0fe819c3-274d-472a-b3f5-ddb258d2d8bb", + "name": "Default security group" + }, + "location": { + "zone_id": "par1", + "platform_id": "14", + "cluster_id": "7", + "hypervisor_id": "801", + "node_id": "95" + }, + "maintenances": [ + + ], + "allowed_actions": [ + "poweroff", + "terminate", + "reboot", + "stop_in_place", + "backup" + ], + "placement_group": null, + "private_nics": [ + + ], + "zone": "fr-par-1" } ] } \ No newline at end of file diff --git a/util/logging/ratelimit.go b/discovery/stackit/metrics.go similarity index 54% rename from util/logging/ratelimit.go rename to discovery/stackit/metrics.go index 32d1e249e6..4143b144b7 100644 --- a/util/logging/ratelimit.go +++ b/discovery/stackit/metrics.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,29 +11,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -package logging +package stackit import ( - "github.com/go-kit/log" - "golang.org/x/time/rate" + "github.com/prometheus/prometheus/discovery" ) -type ratelimiter struct { - limiter *rate.Limiter - next log.Logger +var _ discovery.DiscovererMetrics = (*stackitMetrics)(nil) + +type stackitMetrics struct { + refreshMetrics discovery.RefreshMetricsInstantiator } -// RateLimit write to a logger. -func RateLimit(next log.Logger, limit rate.Limit) log.Logger { - return &ratelimiter{ - limiter: rate.NewLimiter(limit, int(limit)), - next: next, - } -} - -func (r *ratelimiter) Log(keyvals ...interface{}) error { - if r.limiter.Allow() { - return r.next.Log(keyvals...) - } +// Register implements discovery.DiscovererMetrics. +func (m *stackitMetrics) Register() error { return nil } + +// Unregister implements discovery.DiscovererMetrics. +func (m *stackitMetrics) Unregister() {} diff --git a/discovery/stackit/mock_test.go b/discovery/stackit/mock_test.go new file mode 100644 index 0000000000..59641ce2bc --- /dev/null +++ b/discovery/stackit/mock_test.go @@ -0,0 +1,162 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackit + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" +) + +// SDMock is the interface for the STACKIT IAAS API mock. +type SDMock struct { + t *testing.T + Server *httptest.Server + Mux *http.ServeMux +} + +// NewSDMock returns a new SDMock. +func NewSDMock(t *testing.T) *SDMock { + return &SDMock{ + t: t, + } +} + +// Endpoint returns the URI to the mock server. +func (m *SDMock) Endpoint() string { + return m.Server.URL + "/" +} + +// Setup creates the mock server. +func (m *SDMock) Setup() { + m.Mux = http.NewServeMux() + m.Server = httptest.NewServer(m.Mux) + m.t.Cleanup(m.Server.Close) +} + +// ShutdownServer creates the mock server. +func (m *SDMock) ShutdownServer() { + m.Server.Close() +} + +const ( + testToken = "LRK9DAWQ1ZAEFSrCNEEzLCUwhYX1U3g7wMg4dTlkkDC96fyDuyJ39nVbVjCKSDfj" + testProjectID = "00000000-0000-0000-0000-000000000000" +) + +// HandleServers mocks the STACKIT IAAS API. +func (m *SDMock) HandleServers() { + // /token endpoint mocks the token endpoint for service account authentication. + // It checks if the request body starts with "assertion=ey" to simulate a valid assertion + // as defined in RFC 7523. + m.Mux.HandleFunc("/token", func(w http.ResponseWriter, r *http.Request) { + reqBody, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = fmt.Fprint(w, err) + return + } + + // Expecting HTTP form encoded body with the field assertion. + // JWT always start with "ey" (base64url encoded). + if !bytes.HasPrefix(reqBody, []byte("assertion=ey")) { + w.WriteHeader(http.StatusUnauthorized) + return + } + + w.Header().Add("content-type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusOK) + + _, _ = fmt.Fprintf(w, `{"access_token": "%s"}`, testToken) + }) + + m.Mux.HandleFunc(fmt.Sprintf("/v1/projects/%s/servers", testProjectID), func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", testToken) { + w.WriteHeader(http.StatusUnauthorized) + return + } + + w.Header().Add("content-type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusOK) + + _, _ = fmt.Fprint(w, ` +{ + "items": [ + { + "availabilityZone": "eu01-3", + "bootVolume": { + "deleteOnTermination": false, + "id": "1c15e4cc-8474-46be-b875-b473ea9fe80c" + }, + "createdAt": "2025-03-12T14:48:17Z", + "id": "b4176700-596a-4f80-9fc8-5f9c58a606e1", + "labels": { + "provisionSTACKITServerAgent": "true", + "stackit_project_id": "00000000-0000-0000-0000-000000000000" + }, + "launchedAt": "2025-03-12T14:48:52Z", + "machineType": "g1.1", + "name": "runcommandtest", + "nics": [ + { + "ipv4": "10.0.0.153", + "mac": "fa:16:4f:42:1c:d3", + "networkId": "3173494f-2f6c-490d-8c12-4b3c86b4338b", + "networkName": "test", + "publicIp": "192.0.2.1", + "nicId": "b36097c5-e1c5-4e12-ae97-c03e144db127", + "nicSecurity": true, + "securityGroups": [ + "6e60809f-bed3-46c6-a39c-adddd6455674" + ] + } + ], + "powerStatus": "STOPPED", + "serviceAccountMails": [], + "status": "INACTIVE", + "updatedAt": "2025-03-13T07:08:29Z", + "userData": null, + "volumes": [ + "1c15e4cc-8474-46be-b875-b473ea9fe80c" + ] + }, + { + "availabilityZone": "eu01-m", + "bootVolume": { + "deleteOnTermination": false, + "id": "1e3ffe2b-878f-46e5-b39e-372e13a09551" + }, + "createdAt": "2025-04-10T16:45:25Z", + "id": "ee337436-1f15-4647-a03e-154009966179", + "labels": {}, + "launchedAt": "2025-04-10T16:46:00Z", + "machineType": "t1.1", + "name": "server1", + "nics": [], + "powerStatus": "RUNNING", + "serviceAccountMails": [], + "status": "ACTIVE", + "updatedAt": "2025-04-10T16:46:00Z", + "volumes": [ + "1e3ffe2b-878f-46e5-b39e-372e13a09551" + ] + } + ] +}`, + ) + }) +} diff --git a/discovery/stackit/server.go b/discovery/stackit/server.go new file mode 100644 index 0000000000..1be834a689 --- /dev/null +++ b/discovery/stackit/server.go @@ -0,0 +1,222 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackit + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log/slog" + "net" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/stackitcloud/stackit-sdk-go/core/auth" + stackitconfig "github.com/stackitcloud/stackit-sdk-go/core/config" + + "github.com/prometheus/prometheus/discovery/refresh" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" +) + +const ( + stackitAPIEndpoint = "https://iaas.api.%s.stackit.cloud" + + stackitLabelPrivateIPv4 = stackitLabelPrefix + "private_ipv4_" + stackitLabelType = stackitLabelPrefix + "type" + stackitLabelLabel = stackitLabelPrefix + "label_" + stackitLabelLabelPresent = stackitLabelPrefix + "labelpresent_" +) + +// Discovery periodically performs STACKIT Cloud requests. +// It implements the Discoverer interface. +type iaasDiscovery struct { + *refresh.Discovery + httpClient *http.Client + logger *slog.Logger + apiEndpoint string + project string + port int +} + +// newServerDiscovery returns a new iaasDiscovery, which periodically refreshes its targets. +func newServerDiscovery(conf *SDConfig, logger *slog.Logger) (*iaasDiscovery, error) { + d := &iaasDiscovery{ + project: conf.Project, + port: conf.Port, + apiEndpoint: conf.Endpoint, + logger: logger, + } + + rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "stackit_sd") + if err != nil { + return nil, err + } + + d.apiEndpoint = conf.Endpoint + if d.apiEndpoint == "" { + d.apiEndpoint = fmt.Sprintf(stackitAPIEndpoint, conf.Region) + } + + servers := stackitconfig.ServerConfigurations{stackitconfig.ServerConfiguration{ + URL: d.apiEndpoint, + Description: "STACKIT IAAS API", + }} + + d.httpClient = &http.Client{ + Timeout: time.Duration(conf.RefreshInterval), + Transport: rt, + } + + stackitConfiguration := &stackitconfig.Configuration{ + UserAgent: userAgent, + HTTPClient: d.httpClient, + Servers: servers, + NoAuth: conf.ServiceAccountKey == "" && conf.ServiceAccountKeyPath == "", + + ServiceAccountKey: conf.ServiceAccountKey, + PrivateKey: conf.PrivateKey, + ServiceAccountKeyPath: conf.ServiceAccountKeyPath, + PrivateKeyPath: conf.PrivateKeyPath, + CredentialsFilePath: conf.CredentialsFilePath, + } + + if conf.tokenURL != "" { + stackitConfiguration.TokenCustomUrl = conf.tokenURL + } + + authRoundTripper, err := auth.SetupAuth(stackitConfiguration) + if err != nil { + return nil, fmt.Errorf("setting up authentication: %w", err) + } + + d.httpClient.Transport = authRoundTripper + + return d, nil +} + +func (i *iaasDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { + apiURL, err := url.Parse(i.apiEndpoint) + if err != nil { + return nil, fmt.Errorf("invalid API endpoint URL %s: %w", i.apiEndpoint, err) + } + + apiURL.Path, err = url.JoinPath(apiURL.Path, "v1", "projects", i.project, "servers") + if err != nil { + return nil, fmt.Errorf("joining URL path: %w", err) + } + + q := apiURL.Query() + q.Set("details", "true") + apiURL.RawQuery = q.Encode() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, apiURL.String(), nil) + if err != nil { + return nil, fmt.Errorf("creating request: %w", err) + } + + req.Header.Set("Accept", "application/json") + + res, err := i.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("sending request: %w", err) + } + + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + errorMessage, _ := io.ReadAll(res.Body) + + return nil, fmt.Errorf("unexpected status code %d: %s", res.StatusCode, string(errorMessage)) + } + + var serversResponse *ServerListResponse + + if err := json.NewDecoder(res.Body).Decode(&serversResponse); err != nil { + return nil, fmt.Errorf("decoding response: %w", err) + } + + if serversResponse == nil || serversResponse.Items == nil || len(*serversResponse.Items) == 0 { + return []*targetgroup.Group{{Source: "stackit", Targets: []model.LabelSet{}}}, nil + } + + targets := make([]model.LabelSet, 0, len(*serversResponse.Items)) + for _, server := range *serversResponse.Items { + if server.Nics == nil { + i.logger.Debug("server has no network interfaces. Skipping", slog.String("server_id", server.ID)) + continue + } + + labels := model.LabelSet{ + stackitLabelProject: model.LabelValue(i.project), + stackitLabelID: model.LabelValue(server.ID), + stackitLabelName: model.LabelValue(server.Name), + stackitLabelAvailabilityZone: model.LabelValue(server.AvailabilityZone), + stackitLabelStatus: model.LabelValue(server.Status), + stackitLabelPowerStatus: model.LabelValue(server.PowerStatus), + stackitLabelType: model.LabelValue(server.MachineType), + } + + var ( + addressLabel string + serverPublicIP string + ) + + for _, nic := range server.Nics { + if nic.PublicIP != nil && *nic.PublicIP != "" && serverPublicIP == "" { + serverPublicIP = *nic.PublicIP + addressLabel = serverPublicIP + } + + if nic.IPv4 != nil && *nic.IPv4 != "" { + networkLabel := model.LabelName(stackitLabelPrivateIPv4 + strutil.SanitizeLabelName(nic.NetworkName)) + labels[networkLabel] = model.LabelValue(*nic.IPv4) + if addressLabel == "" { + addressLabel = *nic.IPv4 + } + } + } + + if addressLabel == "" { + // Skip servers without IPs. + continue + } + + // Public IPs for servers are optional. + if serverPublicIP != "" { + labels[stackitLabelPublicIPv4] = model.LabelValue(serverPublicIP) + } + + labels[model.AddressLabel] = model.LabelValue(net.JoinHostPort(addressLabel, strconv.FormatUint(uint64(i.port), 10))) + + for labelKey, labelValue := range server.Labels { + if labelStringValue, ok := labelValue.(string); ok { + presentLabel := model.LabelName(stackitLabelLabelPresent + strutil.SanitizeLabelName(labelKey)) + labels[presentLabel] = "true" + + label := model.LabelName(stackitLabelLabel + strutil.SanitizeLabelName(labelKey)) + labels[label] = model.LabelValue(labelStringValue) + } + } + + targets = append(targets, labels) + } + + return []*targetgroup.Group{{Source: "stackit", Targets: targets}}, nil +} diff --git a/discovery/stackit/server_test.go b/discovery/stackit/server_test.go new file mode 100644 index 0000000000..117fbdd66d --- /dev/null +++ b/discovery/stackit/server_test.go @@ -0,0 +1,131 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackit + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" + "github.com/stretchr/testify/require" +) + +type serverSDTestSuite struct { + Mock *SDMock +} + +func (s *serverSDTestSuite) SetupTest(t *testing.T) { + s.Mock = NewSDMock(t) + s.Mock.Setup() + + s.Mock.HandleServers() +} + +func TestServerSDRefresh(t *testing.T) { + for _, tc := range []struct { + name string + cfg SDConfig + }{ + { + name: "default with token", + cfg: func() SDConfig { + cfg := DefaultSDConfig + cfg.HTTPClientConfig.BearerToken = testToken + + return cfg + }(), + }, + { + name: "default with service account key", + cfg: func() SDConfig { + // Generate a new RSA key pair with a size of 2048 bits + key, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err) + + cfg := DefaultSDConfig + cfg.PrivateKey = string(pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(key), + })) + + cfg.ServiceAccountKey = `{ + "Active": true, + "CreatedAt": "2025-04-05T12:34:56Z", + "Credentials": { + "Aud": "https://stackit-service-account-prod.apps.01.cf.eu01.stackit.cloud", + "Iss": "stackit@sa.stackit.cloud", + "Kid": "123e4567-e89b-12d3-a456-426614174000", + "Sub": "123e4567-e89b-12d3-a456-426614174001" + }, + "ID": "123e4567-e89b-12d3-a456-426614174002", + "KeyAlgorithm": "RSA_2048", + "KeyOrigin": "USER_PROVIDED", + "KeyType": "USER_MANAGED", + "PublicKey": "...", + "ValidUntil": "2025-04-05T13:34:56Z" +}` + + return cfg + }(), + }, + } { + t.Run(tc.name, func(t *testing.T) { + suite := &serverSDTestSuite{} + suite.SetupTest(t) + defer suite.Mock.ShutdownServer() + + tc.cfg.Endpoint = suite.Mock.Endpoint() + tc.cfg.tokenURL = suite.Mock.Endpoint() + "token" + tc.cfg.Project = testProjectID + + d, err := newServerDiscovery(&tc.cfg, promslog.NewNopLogger()) + require.NoError(t, err) + + targetGroups, err := d.refresh(context.Background()) + require.NoError(t, err) + require.Len(t, targetGroups, 1) + + targetGroup := targetGroups[0] + require.NotNil(t, targetGroup, "targetGroup should not be nil") + require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil") + require.Len(t, targetGroup.Targets, 1) + + for i, labelSet := range []model.LabelSet{ + { + "__address__": model.LabelValue("192.0.2.1:80"), + "__meta_stackit_project": model.LabelValue("00000000-0000-0000-0000-000000000000"), + "__meta_stackit_id": model.LabelValue("b4176700-596a-4f80-9fc8-5f9c58a606e1"), + "__meta_stackit_type": model.LabelValue("g1.1"), + "__meta_stackit_private_ipv4_test": model.LabelValue("10.0.0.153"), + "__meta_stackit_public_ipv4": model.LabelValue("192.0.2.1"), + "__meta_stackit_labelpresent_provisionSTACKITServerAgent": model.LabelValue("true"), + "__meta_stackit_label_provisionSTACKITServerAgent": model.LabelValue("true"), + "__meta_stackit_labelpresent_stackit_project_id": model.LabelValue("true"), + "__meta_stackit_name": model.LabelValue("runcommandtest"), + "__meta_stackit_availability_zone": model.LabelValue("eu01-3"), + "__meta_stackit_status": model.LabelValue("INACTIVE"), + "__meta_stackit_power_status": model.LabelValue("STOPPED"), + "__meta_stackit_label_stackit_project_id": model.LabelValue("00000000-0000-0000-0000-000000000000"), + }, + } { + require.Equal(t, labelSet, targetGroup.Targets[i]) + } + }) + } +} diff --git a/discovery/stackit/stackit.go b/discovery/stackit/stackit.go new file mode 100644 index 0000000000..030f2bdb55 --- /dev/null +++ b/discovery/stackit/stackit.go @@ -0,0 +1,153 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackit + +import ( + "context" + "errors" + "fmt" + "log/slog" + "net/url" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/common/version" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/refresh" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + stackitLabelPrefix = model.MetaLabelPrefix + "stackit_" + stackitLabelProject = stackitLabelPrefix + "project" + stackitLabelID = stackitLabelPrefix + "id" + stackitLabelName = stackitLabelPrefix + "name" + stackitLabelStatus = stackitLabelPrefix + "status" + stackitLabelPowerStatus = stackitLabelPrefix + "power_status" + stackitLabelAvailabilityZone = stackitLabelPrefix + "availability_zone" + stackitLabelPublicIPv4 = stackitLabelPrefix + "public_ipv4" +) + +var userAgent = version.PrometheusUserAgent() + +// DefaultSDConfig is the default STACKIT SD configuration. +var DefaultSDConfig = SDConfig{ + Region: "eu01", + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + HTTPClientConfig: config.DefaultHTTPClientConfig, +} + +func init() { + discovery.RegisterConfig(&SDConfig{}) +} + +// SDConfig is the configuration for STACKIT based service discovery. +type SDConfig struct { + HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` + + Project string `yaml:"project"` + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + Port int `yaml:"port,omitempty"` + Region string `yaml:"region,omitempty"` + Endpoint string `yaml:"endpoint,omitempty"` + ServiceAccountKey string `yaml:"service_account_key,omitempty"` + PrivateKey string `yaml:"private_key,omitempty"` + ServiceAccountKeyPath string `yaml:"service_account_key_path,omitempty"` + PrivateKeyPath string `yaml:"private_key_path,omitempty"` + CredentialsFilePath string `yaml:"credentials_file_path,omitempty"` + + // For testing only + tokenURL string +} + +// NewDiscovererMetrics implements discovery.Config. +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { + return &stackitMetrics{ + refreshMetrics: rmi, + } +} + +// Name returns the name of the Config. +func (*SDConfig) Name() string { return "stackit" } + +// NewDiscoverer returns a Discoverer for the Config. +func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { + return NewDiscovery(c, opts.Logger, opts.Metrics) +} + +type refresher interface { + refresh(context.Context) ([]*targetgroup.Group, error) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultSDConfig + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + + if c.Endpoint == "" && c.Region == "" { + return errors.New("stackit_sd: endpoint and region missing") + } + + if _, err = url.Parse(c.Endpoint); err != nil { + return fmt.Errorf("stackit_sd: invalid endpoint %q: %w", c.Endpoint, err) + } + + return c.HTTPClientConfig.Validate() +} + +// SetDirectory joins any relative file paths with dir. +func (c *SDConfig) SetDirectory(dir string) { + c.HTTPClientConfig.SetDirectory(dir) +} + +// Discovery periodically performs STACKIT API requests. It implements +// the Discoverer interface. +type Discovery struct { + *refresh.Discovery +} + +// NewDiscovery returns a new Discovery which periodically refreshes its targets. +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { + m, ok := metrics.(*stackitMetrics) + if !ok { + return nil, errors.New("invalid discovery metrics type") + } + + r, err := newRefresher(conf, logger) + if err != nil { + return nil, err + } + + return refresh.NewDiscovery( + refresh.Options{ + Logger: logger, + Mech: "stackit", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: r.refresh, + MetricsInstantiator: m.refreshMetrics, + }, + ), nil +} + +func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) { + return newServerDiscovery(conf, l) +} diff --git a/discovery/stackit/types.go b/discovery/stackit/types.go new file mode 100644 index 0000000000..66681c3455 --- /dev/null +++ b/discovery/stackit/types.go @@ -0,0 +1,38 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackit + +// ServerListResponse Response object for server list request. +// https://docs.api.eu01.stackit.cloud/documentation/iaas/version/v1#tag/Servers/operation/v1ListServersInProject +type ServerListResponse struct { + Items *[]Server `json:"items"` +} + +type Server struct { + AvailabilityZone string `json:"availabilityZone"` + ID string `json:"id"` + Labels map[string]interface{} `json:"labels"` + MachineType string `json:"machineType"` + Name string `json:"name"` + Nics []ServerNetwork `json:"nics"` + PowerStatus string `json:"powerStatus"` + Status string `json:"status"` +} + +// ServerNetwork Describes the object that matches servers to its networks. +type ServerNetwork struct { + NetworkName string `json:"networkName"` + IPv4 *string `json:"ipv4,omitempty"` + PublicIP *string `json:"publicIp,omitempty"` +} diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index 675149f2a3..5efe49e23d 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -19,12 +19,12 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "strings" "time" - "github.com/go-kit/log" "github.com/mwitkow/go-conntrack" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -71,7 +71,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &tritonMetrics{ refreshMetrics: rmi, } @@ -146,10 +146,10 @@ type Discovery struct { } // New returns a new Discovery which periodically refreshes its targets. -func New(logger log.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*tritonMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } tls, err := config.NewTLSConfig(&conf.TLSConfig) diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go index e37693e6bf..b0dccbf898 100644 --- a/discovery/triton/triton_test.go +++ b/discovery/triton/triton_test.go @@ -21,7 +21,6 @@ import ( "net/http/httptest" "net/url" "strconv" - "strings" "testing" "github.com/prometheus/client_golang/prometheus" @@ -182,8 +181,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) { td, m, _ := newTritonDiscovery(conf) _, err := td.refresh(context.Background()) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint")) + require.ErrorContains(t, err, "an error occurred when requesting targets from the discovery endpoint") m.Unregister() } @@ -193,8 +191,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := td.refresh(ctx) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), context.Canceled.Error())) + require.ErrorContains(t, err, context.Canceled.Error()) m.Unregister() } @@ -233,7 +230,7 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) { func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet { var ( td, m, _ = newTritonDiscovery(c) - s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fmt.Fprintln(w, dstr) })) ) diff --git a/discovery/util.go b/discovery/util.go index 83cc640dd9..4e2a088518 100644 --- a/discovery/util.go +++ b/discovery/util.go @@ -19,8 +19,8 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// A utility to be used by implementations of discovery.Discoverer -// which need to manage the lifetime of their metrics. +// MetricRegisterer is used by implementations of discovery.Discoverer that need +// to manage the lifetime of their metrics. type MetricRegisterer interface { RegisterMetrics() error UnregisterMetrics() @@ -34,7 +34,7 @@ type metricRegistererImpl struct { var _ MetricRegisterer = &metricRegistererImpl{} -// Creates an instance of a MetricRegisterer. +// NewMetricRegisterer creates an instance of a MetricRegisterer. // Typically called inside the implementation of the NewDiscoverer() method. func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer { return &metricRegistererImpl{ diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index c8af2f1587..a7745eed46 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "net/url" "path" @@ -24,7 +25,6 @@ import ( "strings" "time" - "github.com/go-kit/log" "github.com/kolo/xmlrpc" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -41,10 +41,10 @@ const ( uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_" uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname" uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn" - uyuniLablelSystemID = uyuniMetaLabelPrefix + "system_id" - uyuniLablelGroups = uyuniMetaLabelPrefix + "groups" - uyuniLablelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" - uyuniLablelExporter = uyuniMetaLabelPrefix + "exporter" + uyuniLabelSystemID = uyuniMetaLabelPrefix + "system_id" + uyuniLabelGroups = uyuniMetaLabelPrefix + "groups" + uyuniLabelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" + uyuniLabelExporter = uyuniMetaLabelPrefix + "exporter" uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module" uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path" uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme" @@ -109,11 +109,11 @@ type Discovery struct { entitlement string separator string interval time.Duration - logger log.Logger + logger *slog.Logger } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &uyuniMetrics{ refreshMetrics: rmi, } @@ -141,18 +141,22 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if c.Server == "" { + //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires server host") } _, err = url.Parse(c.Server) if err != nil { + //nolint:staticcheck // Capitalized first word. return fmt.Errorf("Uyuni Server URL is not valid: %w", err) } if c.Username == "" { + //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires a username") } if c.Password == "" { + //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires a password") } return c.HTTPClientConfig.Validate() @@ -205,17 +209,14 @@ func getEndpointInfoForSystems( err := rpcclient.Call( "system.monitoring.listEndpoints", []interface{}{token, systemIDs}, &endpointInfos) - if err != nil { - return nil, err - } return endpointInfos, err } // NewDiscovery returns a uyuni discovery for the given configuration. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*uyuniMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } apiURL, err := url.Parse(conf.Server) @@ -270,10 +271,10 @@ func (d *Discovery) getEndpointLabels( model.AddressLabel: model.LabelValue(addr), uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname), uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN), - uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), - uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), - uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName), - uyuniLablelExporter: model.LabelValue(endpoint.ExporterName), + uyuniLabelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), + uyuniLabelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), + uyuniLabelEndpointName: model.LabelValue(endpoint.EndpointName), + uyuniLabelExporter: model.LabelValue(endpoint.ExporterName), uyuniLabelProxyModule: model.LabelValue(endpoint.Module), uyuniLabelMetricsPath: model.LabelValue(endpoint.Path), uyuniLabelScheme: model.LabelValue(scheme), diff --git a/discovery/uyuni/uyuni_test.go b/discovery/uyuni/uyuni_test.go index 09be23e2b4..46567587a8 100644 --- a/discovery/uyuni/uyuni_test.go +++ b/discovery/uyuni/uyuni_test.go @@ -21,9 +21,8 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -59,7 +58,7 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err func TestUyuniSDHandleError(t *testing.T) { var ( errTesting = "unable to login to Uyuni API: request error: bad status code - 500" - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, ``) @@ -75,7 +74,7 @@ func TestUyuniSDLogin(t *testing.T) { var ( errTesting = "unable to get the managed system groups information of monitored clients: request error: bad status code - 500" call = 0 - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/xml") switch call { case 0: @@ -106,7 +105,7 @@ func TestUyuniSDLogin(t *testing.T) { func TestUyuniSDSkipLogin(t *testing.T) { var ( errTesting = "unable to get the managed system groups information of monitored clients: request error: bad status code - 500" - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, ``) diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index aaa9c64e47..0ab477438b 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -15,14 +15,14 @@ package vultr import ( "context" - "fmt" + "errors" + "log/slog" "net" "net/http" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -75,7 +75,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &vultrMetrics{ refreshMetrics: rmi, } @@ -114,10 +114,10 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*vultrMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ @@ -134,11 +134,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere Timeout: time.Duration(conf.RefreshInterval), }) - d.client.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)) - - if err != nil { - return nil, fmt.Errorf("error setting up vultr agent: %w", err) - } + d.client.SetUserAgent(version.PrometheusUserAgent()) d.Discovery = refresh.NewDiscovery( refresh.Options{ diff --git a/discovery/vultr/vultr_test.go b/discovery/vultr/vultr_test.go index 2f12a35529..00ef21e38c 100644 --- a/discovery/vultr/vultr_test.go +++ b/discovery/vultr/vultr_test.go @@ -19,9 +19,9 @@ import ( "net/url" "testing" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/discovery" @@ -57,7 +57,7 @@ func TestVultrSDRefresh(t *testing.T) { defer metrics.Unregister() defer refreshMetrics.Unregister() - d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) + d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics) require.NoError(t, err) endpoint, err := url.Parse(sdMock.Mock.Endpoint()) require.NoError(t, err) diff --git a/discovery/xds/client.go b/discovery/xds/client.go index 027ceb2715..a27e060fbd 100644 --- a/discovery/xds/client.go +++ b/discovery/xds/client.go @@ -30,7 +30,7 @@ import ( "github.com/prometheus/common/version" ) -var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) +var userAgent = version.PrometheusUserAgent() // ResourceClient exposes the xDS protocol for a single resource type. // See https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#rest-json-polling-subscriptions . diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go index b699995fb7..bf0e53b348 100644 --- a/discovery/xds/client_test.go +++ b/discovery/xds/client_test.go @@ -52,16 +52,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("127.0.0.1"), "monitoring") require.Empty(t, endpointURL) - require.Error(t, err) - require.Equal(t, "invalid xDS server URL", err.Error()) + require.EqualError(t, err, "invalid xDS server URL") } func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring") require.Empty(t, endpointURL) - require.Error(t, err) - require.Contains(t, err.Error(), "must be either 'http' or 'https'") + require.ErrorContains(t, err, "must be either 'http' or 'https'") } func TestMakeXDSResourceHttpEndpoint(t *testing.T) { @@ -108,7 +106,7 @@ func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, } func TestHTTPResourceClientFetchEmptyResponse(t *testing.T) { - client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { + client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(_ *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { return nil, nil }) defer cleanup() @@ -148,7 +146,7 @@ func TestHTTPResourceClientFetchFullResponse(t *testing.T) { } func TestHTTPResourceClientServerError(t *testing.T) { - client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { + client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(_ *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { return nil, errors.New("server error") }) defer cleanup() diff --git a/discovery/xds/kuma.go b/discovery/xds/kuma.go index d1d540aaf4..6208e6182a 100644 --- a/discovery/xds/kuma.go +++ b/discovery/xds/kuma.go @@ -14,15 +14,16 @@ package xds import ( + "errors" "fmt" + "log/slog" "net/url" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "google.golang.org/protobuf/types/known/anypb" "github.com/prometheus/prometheus/discovery" @@ -99,7 +100,7 @@ func (c *KumaSDConfig) SetDirectory(dir string) { func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { logger := opts.Logger if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return NewKumaHTTPDiscovery(c, logger, opts.Metrics) @@ -158,10 +159,10 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L return targets, nil } -func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) { +func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) { m, ok := metrics.(*xdsMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } // Default to "prometheus" if hostname is unavailable. @@ -170,7 +171,7 @@ func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discove var err error clientID, err = osutil.GetFQDN() if err != nil { - level.Debug(logger).Log("msg", "error getting FQDN", "err", err) + logger.Debug("error getting FQDN", "err", err) clientID = "prometheus" } } diff --git a/discovery/xds/kuma_mads.pb.go b/discovery/xds/kuma_mads.pb.go index b1079bf23f..210a5343a4 100644 --- a/discovery/xds/kuma_mads.pb.go +++ b/discovery/xds/kuma_mads.pb.go @@ -23,13 +23,14 @@ package xds import ( context "context" + reflect "reflect" + sync "sync" + v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" ) const ( diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go index cfb9cbac50..23d754c4b7 100644 --- a/discovery/xds/kuma_test.go +++ b/discovery/xds/kuma_test.go @@ -201,9 +201,8 @@ func TestKumaMadsV1ResourceParserInvalidResources(t *testing.T) { }} groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL) require.Nil(t, groups) - require.Error(t, err) - require.Contains(t, err.Error(), "cannot parse") + require.ErrorContains(t, err, "cannot parse") } func TestNewKumaHTTPDiscovery(t *testing.T) { diff --git a/discovery/xds/metrics.go b/discovery/xds/metrics.go index 597d516566..bdc9598f2c 100644 --- a/discovery/xds/metrics.go +++ b/discovery/xds/metrics.go @@ -29,7 +29,7 @@ type xdsMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &xdsMetrics{ fetchFailuresCount: prometheus.NewCounter( prometheus.CounterOpts{ diff --git a/discovery/xds/xds.go b/discovery/xds/xds.go index 8191d6be1a..db55a2b6f7 100644 --- a/discovery/xds/xds.go +++ b/discovery/xds/xds.go @@ -15,11 +15,10 @@ package xds import ( "context" + "log/slog" "time" v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "google.golang.org/protobuf/encoding/protojson" @@ -104,7 +103,7 @@ type fetchDiscovery struct { refreshInterval time.Duration parseResources resourceParser - logger log.Logger + logger *slog.Logger metrics *xdsMetrics } @@ -140,7 +139,7 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou } if err != nil { - level.Error(d.logger).Log("msg", "error parsing resources", "err", err) + d.logger.Error("error parsing resources", "err", err) d.metrics.fetchFailuresCount.Inc() return } @@ -153,12 +152,12 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou parsedTargets, err := d.parseResources(response.Resources, response.TypeUrl) if err != nil { - level.Error(d.logger).Log("msg", "error parsing resources", "err", err) + d.logger.Error("error parsing resources", "err", err) d.metrics.fetchFailuresCount.Inc() return } - level.Debug(d.logger).Log("msg", "Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets)) + d.logger.Debug("Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets)) select { case <-ctx.Done(): diff --git a/discovery/xds/xds_test.go b/discovery/xds/xds_test.go index 7cce021c5f..af2784bcb2 100644 --- a/discovery/xds/xds_test.go +++ b/discovery/xds/xds_test.go @@ -22,9 +22,9 @@ import ( "time" v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/goleak" "google.golang.org/protobuf/types/known/anypb" @@ -85,12 +85,12 @@ func createTestHTTPServer(t *testing.T, responder discoveryResponder) *httptest. } func constantResourceParser(targets []model.LabelSet, err error) resourceParser { - return func(resources []*anypb.Any, typeUrl string) ([]model.LabelSet, error) { + return func(_ []*anypb.Any, _ string) ([]model.LabelSet, error) { return targets, err } } -var nopLogger = log.NewNopLogger() +var nopLogger = promslog.NewNopLogger() type testResourceClient struct { resourceTypeURL string @@ -120,7 +120,7 @@ func (rc testResourceClient) Close() { func TestPollingRefreshSkipUpdate(t *testing.T) { rc := &testResourceClient{ - fetch: func(ctx context.Context) (*v3.DiscoveryResponse, error) { + fetch: func(_ context.Context) (*v3.DiscoveryResponse, error) { return nil, nil }, } @@ -167,7 +167,7 @@ func TestPollingRefreshAttachesGroupMetadata(t *testing.T) { rc := &testResourceClient{ server: server, protocolVersion: ProtocolV3, - fetch: func(ctx context.Context) (*v3.DiscoveryResponse, error) { + fetch: func(_ context.Context) (*v3.DiscoveryResponse, error) { return &v3.DiscoveryResponse{}, nil }, } @@ -223,14 +223,14 @@ func TestPollingDisappearingTargets(t *testing.T) { rc := &testResourceClient{ server: server, protocolVersion: ProtocolV3, - fetch: func(ctx context.Context) (*v3.DiscoveryResponse, error) { + fetch: func(_ context.Context) (*v3.DiscoveryResponse, error) { return &v3.DiscoveryResponse{}, nil }, } // On the first poll, send back two targets. On the next, send just one. counter := 0 - parser := func(resources []*anypb.Any, typeUrl string) ([]model.LabelSet, error) { + parser := func(_ []*anypb.Any, _ string) ([]model.LabelSet, error) { counter++ if counter == 1 { return []model.LabelSet{ diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index 92904dd71c..af26cc5a0e 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -18,15 +18,16 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net" "strconv" "strings" "time" - "github.com/go-kit/log" "github.com/go-zookeeper/zk" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -58,7 +59,7 @@ type ServersetSDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*ServersetSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*ServersetSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &discovery.NoopDiscovererMetrics{} } @@ -100,7 +101,7 @@ type NerveSDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*NerveSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*NerveSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &discovery.NoopDiscovererMetrics{} } @@ -146,16 +147,16 @@ type Discovery struct { treeCaches []*treecache.ZookeeperTreeCache parse func(data []byte, path string) (model.LabelSet, error) - logger log.Logger + logger *slog.Logger } // NewNerveDiscovery returns a new Discovery for the given Nerve config. -func NewNerveDiscovery(conf *NerveSDConfig, logger log.Logger) (*Discovery, error) { +func NewNerveDiscovery(conf *NerveSDConfig, logger *slog.Logger) (*Discovery, error) { return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseNerveMember) } // NewServersetDiscovery returns a new Discovery for the given serverset config. -func NewServersetDiscovery(conf *ServersetSDConfig, logger log.Logger) (*Discovery, error) { +func NewServersetDiscovery(conf *ServersetSDConfig, logger *slog.Logger) (*Discovery, error) { return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseServersetMember) } @@ -165,11 +166,11 @@ func NewDiscovery( srvs []string, timeout time.Duration, paths []string, - logger log.Logger, + logger *slog.Logger, pf func(data []byte, path string) (model.LabelSet, error), ) (*Discovery, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } conn, _, err := zk.Connect( diff --git a/discovery/zookeeper/zookeeper_test.go b/discovery/zookeeper/zookeeper_test.go index c2b41ce7a3..e496dfef51 100644 --- a/discovery/zookeeper/zookeeper_test.go +++ b/discovery/zookeeper/zookeeper_test.go @@ -26,11 +26,13 @@ func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } +// TestNewDiscoveryError can fail if the DNS resolver mistakenly resolves the domain below. +// See https://github.com/prometheus/prometheus/issues/16191 for a precedent. func TestNewDiscoveryError(t *testing.T) { _, err := NewDiscovery( - []string{"unreachable.test"}, + []string{"unreachable.invalid"}, time.Second, []string{"/"}, nil, - func(data []byte, path string) (model.LabelSet, error) { return nil, nil }) + func(_ []byte, _ string) (model.LabelSet, error) { return nil, nil }) require.Error(t, err) } diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 2faf65105e..e90a7574ba 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -2,12 +2,9 @@ title: prometheus --- -# prometheus - The Prometheus monitoring server - ## Flags | Flag | Description | Default | @@ -15,11 +12,15 @@ The Prometheus monitoring server | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | | --version | Show application version. | | | --config.file | Prometheus configuration file path. | `prometheus.yml` | -| --web.listen-address | Address to listen on for UI, API, and telemetry. | `0.0.0.0:9090` | +| --config.auto-reload-interval | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` | +| --web.listen-address ... | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` | +| --auto-gomaxprocs | Automatically set GOMAXPROCS to match Linux container CPU quota | `true` | +| --auto-gomemlimit | Automatically set GOMEMLIMIT to match Linux container or system memory limit | `true` | | --auto-gomemlimit.ratio | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` | | --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | --web.read-timeout | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | -| --web.max-connections | Maximum number of simultaneous connections. | `512` | +| --web.max-connections | Maximum number of simultaneous connections across all listeners. | `512` | +| --web.max-notifications-subscribers | Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close. | `16` | | --web.external-url | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | | | --web.route-prefix | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | | | --web.user-assets | Path to static asset directory, available at /user. | | @@ -27,18 +28,18 @@ The Prometheus monitoring server | --web.enable-admin-api | Enable API endpoints for admin control actions. | `false` | | --web.enable-remote-write-receiver | Enable API endpoint accepting remote write requests. | `false` | | --web.remote-write-receiver.accepted-protobuf-messages | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` | +| --web.enable-otlp-receiver | Enable API endpoint accepting OTLP write requests. | `false` | | --web.console.templates | Path to the console template directory, available at /consoles. | `consoles` | | --web.console.libraries | Path to the console library directory. | `console_libraries` | | --web.page-title | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | | --web.cors.origin | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` | | --storage.tsdb.path | Base path for metrics storage. Use with server mode only. | `data/` | -| --storage.tsdb.retention | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | | -| --storage.tsdb.retention.time | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | +| --storage.tsdb.retention.time | How long to retain samples in storage. If neither this flag nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | | --storage.tsdb.retention.size | Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Use with server mode only. | | | --storage.tsdb.no-lockfile | Do not create lockfile in data directory. Use with server mode only. | `false` | | --storage.tsdb.head-chunks-write-queue-size | Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental. Use with server mode only. | `0` | | --storage.agent.path | Base path for metrics storage. Use with agent mode only. | `data-agent/` | -| --storage.agent.wal-compression | Compress the agent WAL. Use with agent mode only. | `true` | +| --storage.agent.wal-compression | Compress the agent WAL. If false, the --storage.agent.wal-compression-type flag is ignored. Use with agent mode only. | `true` | | --storage.agent.retention.min-time | Minimum age samples may be before being considered for deletion when the WAL is truncated Use with agent mode only. | | | --storage.agent.retention.max-time | Maximum age samples may be before being forcibly deleted when the WAL is truncated Use with agent mode only. | | | --storage.agent.no-lockfile | Do not create lockfile in data directory. Use with agent mode only. | `false` | @@ -51,12 +52,14 @@ The Prometheus monitoring server | --rules.alert.resend-delay | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` | | --rules.max-concurrent-evals | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` | | --alertmanager.notification-queue-capacity | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` | +| --alertmanager.notification-batch-size | The maximum number of notifications per batch to send to the Alertmanager. Use with server mode only. | `256` | | --alertmanager.drain-notification-queue-on-shutdown | Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down. Use with server mode only. | `true` | | --query.lookback-delta | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` | | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 443cd3f0cb..92e0ac0030 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -2,12 +2,9 @@ title: promtool --- -# promtool - Tooling for the Prometheus monitoring system. - ## Flags | Flag | Description | @@ -15,7 +12,7 @@ Tooling for the Prometheus monitoring system. | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | --version | Show application version. | | --experimental | Enable experimental commands. | -| --enable-feature | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | +| --enable-feature ... | Comma separated feature names to enable. Valid options: promql-experimental-functions, promql-delayed-name-removal. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details | @@ -59,9 +56,10 @@ Check the resources for validity. #### Flags -| Flag | Description | -| --- | --- | -| --extended | Print extended information related to the cardinality of the metrics. | +| Flag | Description | Default | +| --- | --- | --- | +| --query.lookback-delta | The server's maximum query lookback duration. | `5m` | +| --extended | Print extended information related to the cardinality of the metrics. | | @@ -102,8 +100,9 @@ Check if the config files are valid or not. | Flag | Description | Default | | --- | --- | --- | | --syntax-only | Only check the config file syntax, ignoring file and content validation referenced in the config | | -| --lint | Linting checks to apply to the rules specified in the config. Available options are: all, duplicate-rules, none. Use --lint=none to disable linting | `duplicate-rules` | +| --lint | Linting checks to apply to the rules/scrape configs specified in the config. Available options are: all, duplicate-rules, none, too-long-scrape-interval. Use --lint=none to disable linting | `duplicate-rules` | | --lint-fatal | Make lint errors exit with exit code 3. | `false` | +| --ignore-unknown-fields | Ignore unknown fields in the rule groups read by the config files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default. | `false` | | --agent | Check config file for Prometheus in Agent mode. | | @@ -177,6 +176,7 @@ Check if the rule files are valid or not. | --- | --- | --- | | --lint | Linting checks to apply. Available options are: all, duplicate-rules, none. Use --lint=none to disable linting | `duplicate-rules` | | --lint-fatal | Make lint errors exit with exit code 3. | `false` | +| --ignore-unknown-fields | Ignore unknown fields in the rule files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default. | `false` | @@ -281,7 +281,7 @@ Run series query. | Flag | Description | | --- | --- | -| --match | Series selector. Can be specified multiple times. | +| --match ... | Series selector. Can be specified multiple times. | | --start | Start time (RFC3339 or Unix timestamp). | | --end | End time (RFC3339 or Unix timestamp). | @@ -309,7 +309,7 @@ Run labels query. | --- | --- | | --start | Start time (RFC3339 or Unix timestamp). | | --end | End time (RFC3339 or Unix timestamp). | -| --match | Series selector. Can be specified multiple times. | +| --match ... | Series selector. Can be specified multiple times. | @@ -338,7 +338,7 @@ Run queries against your Prometheus to analyze the usage pattern of certain metr | --type | Type of metric: histogram. | | | --duration | Time frame to analyze. | `1h` | | --time | Query time (RFC3339 or Unix timestamp), defaults to now. | | -| --match | Series selector. Can be specified multiple times. | | +| --match ... | Series selector. Can be specified multiple times. | | @@ -442,6 +442,15 @@ Unit testing. +#### Flags + +| Flag | Description | +| --- | --- | +| --junit | File path to store JUnit XML test results. | + + + + ##### `promtool test rules` Unit tests for rules. @@ -452,8 +461,10 @@ Unit tests for rules. | Flag | Description | Default | | --- | --- | --- | -| --run | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | +| --run ... | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | +| --debug | Enable unit test debugging. | `false` | | --diff | [Experimental] Print colored differential output between expected & received output. | `false` | +| --ignore-unknown-fields | Ignore unknown fields in the test files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default. | `false` | @@ -566,10 +577,10 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | -| --sandbox-dir-root | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | -| --min-time | Minimum timestamp to dump. | `-9223372036854775808` | -| --max-time | Maximum timestamp to dump. | `9223372036854775807` | -| --match | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | +| --sandbox-dir-root | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | +| --min-time | Minimum timestamp to dump, in milliseconds since the Unix epoch. | `-9223372036854775808` | +| --max-time | Maximum timestamp to dump, in milliseconds since the Unix epoch. | `9223372036854775807` | +| --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | @@ -593,10 +604,10 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | -| --sandbox-dir-root | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | -| --min-time | Minimum timestamp to dump. | `-9223372036854775808` | -| --max-time | Maximum timestamp to dump. | `9223372036854775807` | -| --match | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | +| --sandbox-dir-root | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | +| --min-time | Minimum timestamp to dump, in milliseconds since the Unix epoch. | `-9223372036854775808` | +| --max-time | Maximum timestamp to dump, in milliseconds since the Unix epoch. | `9223372036854775807` | +| --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | @@ -632,6 +643,15 @@ Import samples from OpenMetrics input and produce TSDB blocks. Please refer to t +###### Flags + +| Flag | Description | +| --- | --- | +| --label | Label to attach to metrics. Can be specified multiple times. Example --label=label_name=label_value | + + + + ###### Arguments | Argument | Description | Default | Required | diff --git a/docs/configuration/alerting_rules.md b/docs/configuration/alerting_rules.md index 3c1ec84f0f..faffad56f2 100644 --- a/docs/configuration/alerting_rules.md +++ b/docs/configuration/alerting_rules.md @@ -3,15 +3,13 @@ title: Alerting rules sort_rank: 3 --- -# Alerting rules - Alerting rules allow you to define alert conditions based on Prometheus expression language expressions and to send notifications about firing alerts to an external service. Whenever the alert expression results in one or more vector elements at a given point in time, the alert counts as active for these elements' label sets. -### Defining alerting rules +## Defining alerting rules Alerting rules are configured in Prometheus in the same way as [recording rules](recording_rules.md). @@ -21,10 +19,13 @@ An example rules file with an alert would be: ```yaml groups: - name: example + labels: + team: myteam rules: - alert: HighRequestLatency expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 for: 10m + keep_firing_for: 5m labels: severity: page annotations: @@ -38,13 +39,20 @@ the alert continues to be active during each evaluation for 10 minutes before firing the alert. Elements that are active, but not firing yet, are in the pending state. Alerting rules without the `for` clause will become active on the first evaluation. +There is also an optional `keep_firing_for` clause that tells Prometheus to keep +this alert firing for the specified duration after the firing condition was last met. +This can be used to prevent situations such as flapping alerts, false resolutions +due to lack of data loss, etc. Alerting rules without the `keep_firing_for` clause +will deactivate on the first evaluation where the condition is not met (assuming +any optional `for` duration described above has been satisfied). + The `labels` clause allows specifying a set of additional labels to be attached to the alert. Any existing conflicting labels will be overwritten. The label values can be templated. The `annotations` clause specifies a set of informational labels that can be used to store longer additional information such as alert descriptions or runbook links. The annotation values can be templated. -#### Templating +### Templating Label and annotation values can be templated using [console templates](https://prometheus.io/docs/visualization/consoles). The `$labels` @@ -83,7 +91,7 @@ groups: description: "{{ $labels.instance }} has a median request latency above 1s (current value: {{ $value }}s)" ``` -### Inspecting alerts during runtime +## Inspecting alerts during runtime To manually inspect which alerts are active (pending or firing), navigate to the "Alerts" tab of your Prometheus instance. This will show you the exact @@ -95,7 +103,7 @@ The sample value is set to `1` as long as the alert is in the indicated active (pending or firing) state, and the series is marked stale when this is no longer the case. -### Sending alert notifications +## Sending alert notifications Prometheus's alerting rules are good at figuring what is broken *right now*, but they are not a fully-fledged notification solution. Another layer is needed to @@ -104,6 +112,6 @@ on top of the simple alert definitions. In Prometheus's ecosystem, the [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) takes on this role. Thus, Prometheus may be configured to periodically send information about alert states to an Alertmanager instance, which then takes care of dispatching -the right notifications. +the right notifications. Prometheus can be [configured](configuration.md) to automatically discover available Alertmanager instances through its service discovery integrations. diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 35976871b9..45f099af4e 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -3,8 +3,6 @@ title: Configuration sort_rank: 1 --- -# Configuration - Prometheus is configured via command-line flags and a configuration file. While the command-line flags configure immutable system parameters (such as storage locations, amount of data to keep on disk and in memory, etc.), the @@ -59,6 +57,7 @@ global: [ scrape_interval: | default = 1m ] # How long until a scrape request times out. + # It cannot be greater than the scrape interval. [ scrape_timeout: | default = 10s ] # The protocols to negotiate during a scrape with the client. @@ -70,13 +69,20 @@ global: # How frequently to evaluate rules. [ evaluation_interval: | default = 1m ] - - # Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received. - # Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping. + + # Offset the rule evaluation timestamp of this particular group by the + # specified duration into the past to ensure the underlying metrics have + # been received. Metric availability delays are more likely to occur when + # Prometheus is running as a remote write target, but can also occur when + # there's anomalies with scraping. [ rule_query_offset: | default = 0s ] # The labels to add to any time series or alerts when communicating with # external systems (federation, remote storage, Alertmanager). + # Environment variable references `${var}` or `$var` are replaced according + # to the values of the current environment variables. + # References to undefined variables are replaced by the empty string. + # The `$` character can be escaped by using `$$`. external_labels: [ : ... ] @@ -84,33 +90,39 @@ global: # Reloading the configuration will reopen the file. [ query_log_file: ] + # File to which scrape failures are logged. + # Reloading the configuration will reopen the file. + [ scrape_failure_log_file: ] + # An uncompressed response body larger than this many bytes will cause the # scrape to fail. 0 means no limit. Example: 100MB. # This is an experimental feature, this behaviour could # change or be removed in the future. [ body_size_limit: | default = 0 ] - # Per-scrape limit on number of scraped samples that will be accepted. + # Per-scrape limit on the number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. [ sample_limit: | default = 0 ] - # Per-scrape limit on number of labels that will be accepted for a sample. If - # more than this number of labels are present post metric-relabeling, the - # entire scrape will be treated as failed. 0 means no limit. + # Limit on the number of labels that will be accepted per sample. If more + # than this number of labels are present on any sample post metric-relabeling, + # the entire scrape will be treated as failed. 0 means no limit. [ label_limit: | default = 0 ] - # Per-scrape limit on length of labels name that will be accepted for a sample. - # If a label name is longer than this number post metric-relabeling, the entire - # scrape will be treated as failed. 0 means no limit. + # Limit on the length (in bytes) of each individual label name. If any label + # name in a scrape is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. Note that label names are UTF-8 + # encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_name_length_limit: | default = 0 ] - # Per-scrape limit on length of labels value that will be accepted for a sample. - # If a label value is longer than this number post metric-relabeling, the - # entire scrape will be treated as failed. 0 means no limit. + # Limit on the length (in bytes) of each individual label value. If any label + # value in a scrape is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. Note that label values are UTF-8 + # encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_value_length_limit: | default = 0 ] - # Per-scrape config limit on number of unique targets that will be + # Limit per scrape config on number of unique targets that will be # accepted. If more than this number of targets are present after target # relabeling, Prometheus will mark the targets as failed without scraping them. # 0 means no limit. This is an experimental feature, this behaviour could @@ -121,6 +133,20 @@ global: # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] + # Specifies the validation scheme for metric and label names. Either blank or + # "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, + # and underscores. + [ metric_name_validation_scheme: | default "utf8" ] + + # Specifies whether to convert all scraped classic histograms into native + # histograms with custom buckets. + [ convert_classic_histograms_to_nhcb: | default = false] + + # Specifies whether to scrape a classic histogram, even if it is also exposed as a native + # histogram (has no effect without --enable-feature=native-histograms). + [ always_scrape_classic_histograms: | default = false ] + + runtime: # Configure the Go garbage collector GOGC parameter # See: https://tip.golang.org/doc/gc-guide#GOGC @@ -152,6 +178,46 @@ alerting: remote_write: [ - ... ] +# Settings related to the OTLP receiver feature. +# See https://prometheus.io/docs/guides/opentelemetry/ for best practices. +otlp: + # Promote specific list of resource attributes to labels. + # It cannot be configured simultaneously with 'promote_all_resource_attributes: true'. + [ promote_resource_attributes: [, ...] | default = [ ] ] + # Promoting all resource attributes to labels, except for the ones configured with 'ignore_resource_attributes'. + # Be aware that changes in attributes received by the OTLP endpoint may result in time series churn and lead to high memory usage by the Prometheus server. + # It cannot be set to 'true' simultaneously with 'promote_resource_attributes'. + [ promote_all_resource_attributes: | default = false ] + # Which resource attributes to ignore, can only be set when 'promote_all_resource_attributes' is true. + [ ignore_resource_attributes: [, ...] | default = [] ] + # Configures translation of OTLP metrics when received through the OTLP metrics + # endpoint. Available values: + # - "UnderscoreEscapingWithSuffixes" refers to commonly agreed normalization used + # by OpenTelemetry in https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus + # - "NoUTF8EscapingWithSuffixes" is a mode that relies on UTF-8 support in Prometheus. + # It preserves all special characters like dots, but still adds required metric name suffixes + # for units and _total, as UnderscoreEscapingWithSuffixes does. + # - (EXPERIMENTAL) "NoTranslation" is a mode that relies on UTF-8 support in Prometheus. + # It preserves all special character like dots and won't append special suffixes for metric + # unit and type. + # + # WARNING: The "NoTranslation" setting has significant known risks and limitations (see https://prometheus.io/docs/practices/naming/ + # for details): + # * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration). + # * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed + # time series. For instance, you may end up in situation of ingesting `foo.bar` series with unit + # `seconds` and a separate series `foo.bar` with unit `milliseconds`. + [ translation_strategy: | default = "UnderscoreEscapingWithSuffixes" ] + # Enables adding "service.name", "service.namespace" and "service.instance.id" + # resource attributes to the "target_info" metric, on top of converting + # them into the "instance" and "job" labels. + [ keep_identifying_resource_attributes: | default = false ] + # Configures optional translation of OTLP explicit bucket histograms into native histograms with custom buckets. + [ convert_histograms_to_nhcb: | default = false ] + # Enables promotion of OTel scope metadata (i.e. name, version, schema URL, and attributes) to metric labels. + # This is disabled by default for backwards compatibility, but according to OTel spec, scope metadata _should_ be identifying, i.e. translated to metric labels. + [ promote_scope_metadata: | default = false ] + # Settings related to the remote read feature. remote_read: [ - ... ] @@ -186,16 +252,24 @@ job_name: [ scrape_interval: | default = ] # Per-scrape timeout when scraping this job. +# It cannot be greater than the scrape interval. [ scrape_timeout: | default = ] # The protocols to negotiate during a scrape with the client. # Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, -# OpenMetricsText1.0.0, PrometheusText0.0.4. +# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0. [ scrape_protocols: [, ...] | default = ] -# Whether to scrape a classic histogram that is also exposed as a native +# Fallback protocol to use if a scrape returns blank, unparseable, or otherwise +# invalid Content-Type. +# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, +# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0. +[ fallback_scrape_protocol: ] + +# Whether to scrape a classic histogram, even if it is also exposed as a native # histogram (has no effect without --enable-feature=native-histograms). -[ scrape_classic_histograms: | default = false ] +[ always_scrape_classic_histograms: | +default = ] # The HTTP resource path on which to fetch metrics from targets. [ metrics_path: | default = /metrics ] @@ -251,53 +325,13 @@ params: # response from the scraped target. [ enable_compression: | default = true ] -# Sets the `Authorization` header on every scrape request with the -# configured username and password. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Sets the `Authorization` header on every scrape request with -# the configured credentials. -authorization: - # Sets the authentication type of the request. - [ type: | default: Bearer ] - # Sets the credentials of the request. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials of the request with the credentials read from the - # configured file. It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configure whether scrape requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# Configures the scrape request's TLS settings. -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] +# File to which scrape failures are logged. +# Reloading the configuration will reopen the file. +[ scrape_failure_log_file: ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] # List of Azure service discovery configurations. azure_sd_configs: @@ -400,6 +434,10 @@ scaleway_sd_configs: serverset_sd_configs: [ - ... ] +# List of STACKIT service discovery configurations. +stackit_sd_configs: + [ - ... ] + # List of Triton service discovery configurations. triton_sd_configs: [ - ... ] @@ -426,48 +464,73 @@ metric_relabel_configs: # change or be removed in the future. [ body_size_limit: | default = 0 ] -# Per-scrape limit on number of scraped samples that will be accepted. +# Per-scrape limit on the number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. [ sample_limit: | default = 0 ] -# Per-scrape limit on number of labels that will be accepted for a sample. If -# more than this number of labels are present post metric-relabeling, the -# entire scrape will be treated as failed. 0 means no limit. +# Limit on the number of labels that will be accepted per sample. If more +# than this number of labels are present on any sample post metric-relabeling, +# the entire scrape will be treated as failed. 0 means no limit. [ label_limit: | default = 0 ] -# Per-scrape limit on length of labels name that will be accepted for a sample. -# If a label name is longer than this number post metric-relabeling, the entire -# scrape will be treated as failed. 0 means no limit. +# Limit on the length (in bytes) of each individual label name. If any label +# name in a scrape is longer than this number post metric-relabeling, the +# entire scrape will be treated as failed. Note that label names are UTF-8 +# encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_name_length_limit: | default = 0 ] -# Per-scrape limit on length of labels value that will be accepted for a sample. -# If a label value is longer than this number post metric-relabeling, the -# entire scrape will be treated as failed. 0 means no limit. +# Limit on the length (in bytes) of each individual label value. If any label +# value in a scrape is longer than this number post metric-relabeling, the +# entire scrape will be treated as failed. Note that label values are UTF-8 +# encoded, and characters can take up to 4 bytes. 0 means no limit. [ label_value_length_limit: | default = 0 ] -# Per-scrape config limit on number of unique targets that will be +# Limit per scrape config on number of unique targets that will be # accepted. If more than this number of targets are present after target # relabeling, Prometheus will mark the targets as failed without scraping them. # 0 means no limit. This is an experimental feature, this behaviour could # change in the future. [ target_limit: | default = 0 ] -# Per-job limit on the number of targets dropped by relabeling +# Limit per scrape config on the number of targets dropped by relabeling # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] +# Specifies the validation scheme for metric and label names. Either blank or +# "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, and +# underscores. +[ metric_name_validation_scheme: | default "utf8" ] + +# Specifies the character escaping scheme that will be requested when scraping +# for metric and label names that do not conform to the legacy Prometheus +# character set. Available options are: +# * `allow-utf-8`: Full UTF-8 support, no escaping needed. +# * `underscores`: Escape all legacy-invalid characters to underscores. +# * `dots`: Escapes dots to `_dot_`, underscores to `__`, and all other +# legacy-invalid characters to underscores. +# * `values`: Prepend the name with `U__` and replace all invalid +# characters with their unicode value, surrounded by underscores. Single +# underscores are replaced with double underscores. +# e.g. "U__my_2e_dotted_2e_name". +# If this value is left blank, Prometheus will default to `allow-utf-8` if the +# validation scheme for the current scrape config is set to utf8, or +# `underscores` if the validation scheme is set to `legacy`. +[ metric_name_escaping_scheme: | default "allow-utf-8" ] + # Limit on total number of positive and negative buckets allowed in a single -# native histogram. If this is exceeded, the entire scrape will be treated as -# failed. 0 means no limit. +# native histogram. The resolution of a histogram with more buckets will be +# reduced until the number of buckets is within the limit. If the limit cannot +# be reached, the scrape will fail. +# 0 means no limit. [ native_histogram_bucket_limit: | default = 0 ] # Lower limit for the growth factor of one bucket to the next in each native # histogram. The resolution of a histogram with a lower growth factor will be -# reduced until it is within the limit. +# reduced as much as possible until it is within the limit. # To set an upper limit for the schema (equivalent to "scale" in OTel's # exponential histograms), use the following factor limits: -# +# # +----------------------------+----------------------------+ # | growth factor | resulting schema AKA scale | # +----------------------------+----------------------------+ @@ -497,14 +560,86 @@ metric_relabel_configs: # +----------------------------+----------------------------+ # | 1.002 | 8 | # +----------------------------+----------------------------+ -# +# # 0 results in the smallest supported factor (which is currently ~1.0027 or # schema 8, but might change in the future). [ native_histogram_min_bucket_factor: | default = 0 ] + +# Specifies whether to convert classic histograms into native histograms with +# custom buckets (has no effect without --enable-feature=native-histograms). +[ convert_classic_histograms_to_nhcb: | default = +] ``` Where `` must be unique across all scrape configurations. +### `` + +A `http_config` allows configuring HTTP requests. + +```yaml +# Sets the `Authorization` header on every request with the +# configured username and password. +# username and username_file are mutually exclusive. +# password and password_file are mutually exclusive. +basic_auth: + [ username: ] + [ username_file: ] + [ password: ] + [ password_file: ] + +# Sets the `Authorization` header on every request with +# the configured credentials. +authorization: + # Sets the authentication type of the request. + [ type: | default: Bearer ] + # Sets the credentials of the request. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials of the request with the credentials read from the + # configured file. It is mutually exclusive with `credentials`. + [ credentials_file: ] + +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + +# Configure whether requests follow HTTP 3xx redirects. +[ follow_redirects: | default = true ] + +# Whether to enable HTTP2. +[ enable_http2: | default: true ] + +# Configures the request's TLS settings. +tls_config: + [ ] + +# Optional proxy URL. +[ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] + +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] +``` + ### `` A `tls_config` allows configuring TLS connections. @@ -543,7 +678,7 @@ A `tls_config` allows configuring TLS connections. ### `` -OAuth 2.0 authentication using the client credentials grant type. +OAuth 2.0 authentication using the client credentials or password grant type. Prometheus fetches an access token from the specified endpoint with the given client access and secret keys. @@ -563,6 +698,11 @@ scopes: token_url: # Optional parameters to append to the token URL. +# To set 'password' grant type, add it to params: +# endpoint_params: +# grant_type: 'password' +# username: 'username@example.com' +# password: 'strongpassword' endpoint_params: [ : ... ] @@ -581,12 +721,31 @@ tls_config: # Specifies headers to send to proxies during CONNECT requests. [ proxy_connect_header: [ : [, ...] ] ] + +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] ``` ### `` Azure SD configurations allow retrieving scrape targets from Azure VMs. +The discovery requires at least the following permissions: + +* `Microsoft.Compute/virtualMachines/read`: Required for VM discovery +* `Microsoft.Network/networkInterfaces/read`: Required for VM discovery +* `Microsoft.Compute/virtualMachineScaleSets/virtualMachines/read`: Required for scale set (VMSS) discovery +* `Microsoft.Compute/virtualMachineScaleSets/virtualMachines/networkInterfaces/read`: Required for scale set (VMSS) discovery + The following meta labels are available on targets during [relabeling](#relabel_config): * `__meta_azure_machine_id`: the machine ID @@ -634,53 +793,9 @@ subscription_id: # instead be specified in the relabeling rule. [ port: | default = 80 ] -# Authentication information used to authenticate to the Azure API. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information, currently not support by Azure. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration, currently not supported by Azure. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration, currently not supported by Azure. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -693,7 +808,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_consul_address`: the address of the target * `__meta_consul_dc`: the datacenter name for the target * `__meta_consul_health`: the health status of the service -* `__meta_consul_partition`: the admin partition name where the service is registered +* `__meta_consul_partition`: the admin partition name where the service is registered * `__meta_consul_metadata_`: each node metadata key value of the target * `__meta_consul_node`: the node name defined for the target * `__meta_consul_service_address`: the service address of the target @@ -726,14 +841,17 @@ The following meta labels are available on targets during [relabeling](#relabel_ services: [ - ] -# See https://www.consul.io/api/catalog.html#list-nodes-for-service to know more -# about the possible filters that can be used. +# A Consul Filter expression used to filter the catalog results +# See https://www.consul.io/api-docs/catalog#list-services to know more +# about the filter expressions that can be used. +[ filter: ] +# The `tags` and `node_meta` fields are deprecated in Consul in favor of `filter`. # An optional list of tags used to filter nodes for a given service. Services must contain all tags in the list. tags: [ - ] -# Node metadata key/value pairs to filter nodes for a given service. +# Node metadata key/value pairs to filter nodes for a given service. As of Consul 1.14, consider `filter` instead. [ node_meta: [ : ... ] ] @@ -747,53 +865,9 @@ tags: # On large setup it might be a good idea to increase this value because the catalog will change all the time. [ refresh_interval: | default = 30s ] -# Authentication information used to authenticate to the consul server. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` Note that the IP number and port used to scrape the targets is assembled as @@ -833,60 +907,15 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_digitalocean_vpc`: the id of the droplet's VPC ```yaml -# Authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information, not currently supported by DigitalOcean. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - # The port to scrape metrics from. [ port: | default = 80 ] # The time after which the droplets are refreshed. [ refresh_interval: | default = 60s ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -918,22 +947,6 @@ See below for the configuration options for Docker discovery: # Address of the Docker daemon. host: -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# TLS configuration. -tls_config: - [ ] - # The port to scrape metrics from, when `role` is nodes, and for discovered # tasks and services that don't have published ports. [ port: | default = 80 ] @@ -941,7 +954,9 @@ tls_config: # The host to use if the container is in host networking mode. [ host_networking_host: | default = "localhost" ] -# Match the first network if the container has multiple networks defined, thus avoiding collecting duplicate targets. +# Sort all non-nil networks in ascending order based on network name and +# get the first network if the container has multiple networks defined, +# thus avoiding collecting duplicate targets. [ match_first_network: | default = true ] # Optional filters to limit the discovery process to a subset of available @@ -955,39 +970,9 @@ tls_config: # The time after which the containers are refreshed. [ refresh_interval: | default = 60s ] -# Authentication information used to authenticate to the Docker daemon. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` The [relabeling phase](#relabel_config) is the preferred and more powerful @@ -1096,22 +1081,6 @@ See below for the configuration options for Docker Swarm discovery: # Address of the Docker daemon. host: -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# TLS configuration. -tls_config: - [ ] - # Role of the targets to retrieve. Must be `services`, `tasks`, or `nodes`. role: @@ -1132,39 +1101,9 @@ role: # The time after which the service discovery data is refreshed. [ refresh_interval: | default = 60s ] -# Authentication information used to authenticate to the Docker daemon. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` The [relabeling phase](#relabel_config) is the preferred and more powerful @@ -1279,53 +1218,9 @@ filters: [ - name: values: , [...] ] -# Authentication information used to authenticate to the EC2 API. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information, currently not supported by AWS. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration, currently not supported by AWS. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutuall exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration, currently not supported by AWS. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` The [relabeling phase](#relabel_config) is the preferred and more powerful @@ -1374,6 +1269,25 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_openstack_tag_`: each metadata item of the instance, with any unsupported characters converted to an underscore. * `__meta_openstack_user_id`: the user account owning the tenant. +#### `loadbalancer` + +The `loadbalancer` role discovers one target per Octavia loadbalancer with a +`PROMETHEUS` listener. The target address defaults to the VIP address +of the load balancer. + +The following meta labels are available on targets during [relabeling](#relabel_config): + +* `__meta_openstack_loadbalancer_availability_zone`: the availability zone of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_floating_ip`: the floating IP of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_id`: the OpenStack load balancer ID. +* `__meta_openstack_loadbalancer_name`: the OpenStack load balancer name. +* `__meta_openstack_loadbalancer_provider`: the Octavia provider of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_operating_status`: the operating status of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_provisioning_status`: the provisioning status of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_tags`: comma separated list of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_vip`: the VIP of the OpenStack load balancer. +* `__meta_openstack_project_id`: the project (tenant) owning this load balancer. + See below for the configuration options for OpenStack discovery: ```yaml @@ -1554,51 +1468,9 @@ query: # The port to scrape metrics from. [ port: | default = 80 ] -# TLS configuration to connect to the PuppetDB. -tls_config: - [ ] - -# basic_auth, authorization, and oauth2, are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# `Authorization` HTTP header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials with the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` See [this example Prometheus configuration file](/documentation/examples/prometheus-puppetdb.yml) @@ -1612,7 +1484,7 @@ and serves as an interface to plug in custom service discovery mechanisms. It reads a set of files containing a list of zero or more ``s. Changes to all defined files are detected via disk watches -and applied immediately. +and applied immediately. While those individual files are watched for changes, the parent directory is also watched implicitly. This is to handle [atomic @@ -1779,62 +1651,19 @@ The labels below are only available for targets with `role` set to `robot`: # One of robot or hcloud. role: -# Authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information, required when role is robot -# Role hcloud does not support basic auth. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration, required when role is -# hcloud. Role robot does not support bearer token authentication. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - # The port to scrape metrics from. [ port: | default = 80 ] # The time after which the servers are refreshed. [ refresh_interval: | default = 60s ] + +# Label selector used to filter the servers when fetching them from the API. See https://docs.hetzner.cloud/#label-selector for more details. +# Only used when role is hcloud. +[ label_selector: ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -1876,53 +1705,9 @@ url: # Refresh interval to re-query the endpoint. [ refresh_interval: | default = 60s ] -# Authentication information used to authenticate to the API server. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -1956,62 +1741,15 @@ following meta labels are available on all targets during # The unique ID of the data center. datacenter_id: -# Authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information, required when using IONOS -# Cloud username and password as authentication method. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration, required when using IONOS -# Cloud token as authentication method. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - # The port to scrape metrics from. [ port: | default = 80 ] # The time after which the servers are refreshed. [ refresh_interval: | default = 60s ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -2103,6 +1841,9 @@ The `endpoints` role discovers targets from listed endpoints of a service. For e address one target is discovered per port. If the endpoint is backed by a pod, all additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well. +Note that the Endpoints API is [deprecated in Kubernetes v1.33+](https://kubernetes.io/blog/2025/04/24/endpoints-deprecation/), +it is recommended to use EndpointSlices instead and switch to the `endpointslice` role below. + Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the endpoints object. @@ -2129,6 +1870,8 @@ The `endpointslice` role discovers targets from existing endpointslices. For eac address referenced in the endpointslice object one target is discovered. If the endpoint is backed by a pod, all additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well. +The role requires the `discovery.k8s.io/v1` API version (available since Kubernetes v1.21). + Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the endpoints object. @@ -2149,7 +1892,7 @@ Available meta labels: * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation. * `__meta_kubernetes_endpointslice_endpoint_hostname`: Hostname of the referenced endpoint. * `__meta_kubernetes_endpointslice_endpoint_node_name`: Name of the Node hosting the referenced endpoint. - * `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in (only available when using the `discovery.k8s.io/v1` API group). + * `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in. * `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint. * `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint. * `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint. @@ -2162,6 +1905,8 @@ The `ingress` role discovers a target for each path of each ingress. This is generally useful for blackbox monitoring of an ingress. The address will be set to the host specified in the ingress spec. +The role requires the `networking.k8s.io/v1` API version (available since Kubernetes v1.19). + Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the ingress object. @@ -2193,54 +1938,6 @@ role: # Note that api_server and kube_config are mutually exclusive. [ kubeconfig_file: ] -# Optional authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - # Optional namespace discovery. If omitted, all namespaces are used. namespaces: own_namespace: @@ -2270,6 +1967,10 @@ attach_metadata: # Attaches node metadata to discovered targets. Valid for roles: pod, endpoints, endpointslice. # When set to true, Prometheus must have permissions to get Nodes. [ node: | default = false ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` See [this example Prometheus configuration file](/documentation/examples/prometheus-kubernetes.yml) @@ -2282,7 +1983,7 @@ which automates the Prometheus setup on top of Kubernetes. Kuma SD configurations allow retrieving scrape target from the [Kuma](https://kuma.io) control plane. -This SD discovers "monitoring assignments" based on Kuma [Dataplane Proxies](https://kuma.io/docs/latest/documentation/dps-and-data-model), +This SD discovers "monitoring assignments" based on Kuma [Dataplane Proxies](https://kuma.io/docs/latest/production/dp-config/dpp/#data-plane-proxy), via the MADS v1 (Monitoring Assignment Discovery Service) xDS API, and will create a target for each proxy inside a Prometheus-enabled mesh. @@ -2299,7 +2000,7 @@ See below for the configuration options for Kuma MonitoringAssignment discovery: # Address of the Kuma Control Plane's MADS xDS server. server: -# Client id is used by Kuma Control Plane to compute Monitoring Assignment for specific Prometheus backend. +# Client id is used by Kuma Control Plane to compute Monitoring Assignment for specific Prometheus backend. # This is useful when migrating between multiple Prometheus backends, or having separate backend for each Mesh. # When not specified, system hostname/fqdn will be used if available, if not `prometheus` will be used. [ client_id: ] @@ -2310,54 +2011,9 @@ server: # The time after which the monitoring assignments are refreshed. [ fetch_timeout: | default = 2m ] -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# TLS configuration. -tls_config: - [ ] - -# Authentication information used to authenticate to the Docker daemon. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional the `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials with the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` The [relabeling phase](#relabel_config) is the preferred and more powerful way @@ -2411,53 +2067,9 @@ See below for the configuration options for Lightsail discovery: # instead be specified in the relabeling rule. [ port: | default = 80 ] -# Authentication information used to authenticate to the Lightsail API. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information, currently not supported by AWS. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration, currently not supported by AWS. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutuall exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration, currently not supported by AWS. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -2468,6 +2080,8 @@ This service discovery uses the public IPv4 address by default, by that can be changed with relabeling, as demonstrated in [the Prometheus linode-sd configuration file](/documentation/examples/prometheus-linode.yml). +Linode APIv4 Token must be created with scopes: `linodes:read_only`, `ips:read_only`, and `events:read_only`. + The following meta labels are available on targets during [relabeling](#relabel_config): * `__meta_linode_instance_id`: the id of the linode instance @@ -2484,7 +2098,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_linode_status`: the status of the linode instance * `__meta_linode_tags`: a list of tags of the linode instance joined by the tag separator * `__meta_linode_group`: the display group a linode instance is a member of -* `__meta_linode_gpus`: the number of GPU's of the linode instance +* `__meta_linode_gpus`: the number of GPU's of the linode instance * `__meta_linode_hypervisor`: the virtualization software powering the linode instance * `__meta_linode_backups`: the backup service status of the linode instance * `__meta_linode_specs_disk_bytes`: the amount of storage space the linode instance has access to @@ -2495,59 +2109,10 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_linode_ipv6_ranges`: a list of IPv6 ranges with mask assigned to the linode instance joined by the tag separator ```yaml -# Authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. -# Note: Linode APIv4 Token must be created with scopes: 'linodes:read_only', 'ips:read_only', and 'events:read_only' - -# Optional HTTP basic authentication information, not currently supported by Linode APIv4. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional the `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials with the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] # Optional region to filter on. [ region: ] -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - # The port to scrape metrics from. [ port: | default = 80 ] @@ -2556,6 +2121,10 @@ tls_config: # The time after which the linode instances are refreshed. [ refresh_interval: | default = 60s ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -2596,55 +2165,9 @@ servers: # It is mutually exclusive with `auth_token` and other authentication mechanisms. [ auth_token_file: ] -# Sets the `Authorization` header on every request with the -# configured username and password. -# This is mutually exclusive with other authentication mechanisms. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -# NOTE: The current version of DC/OS marathon (v1.11.0) does not support -# standard `Authentication` header, use `auth_token` or `auth_token_file` -# instead. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration for connecting to marathon servers -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` By default every app listed in Marathon will be scraped by Prometheus. If not all @@ -2703,56 +2226,13 @@ The following meta labels are available on targets during [relabeling](#relabel_ [ namespace: | default = default ] [ refresh_interval: | default = 60s ] [ region: | default = global ] -[ server: ] +# The URL to connect to the API. +[ server: ] [ tag_separator: | default = ,] -# Authentication information used to authenticate to the nomad server. -# Note that `basic_auth`, `authorization` and `oauth2` options are -# mutually exclusive. -# `password` and `password_file` are mutually exclusive. - -# Optional HTTP basic authentication information. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -2785,6 +2265,70 @@ paths: Serverset data must be in the JSON format, the Thrift format is not currently supported. +### `` + +[STACKIT](https://www.stackit.de/de/) SD configurations allow retrieving +scrape targets from various APIs. + +The following meta labels are available on targets during [relabeling](#relabel_config): + +* `__meta_stackit_availability_zone`: The availability zone of the server. +* `__meta_stackit_label_`: Each server label, with unsupported characters replaced by underscores. +* `__meta_stackit_labelpresent_`: "true" for each label of the server, with unsupported characters replaced by underscores. +* `__meta_stackit_private_ipv4_`: the private ipv4 address of the server within a given network +* `__meta_stackit_public_ipv4`: the public ipv4 address of the server +* `__meta_stackit_id`: The ID of the target. +* `__meta_stackit_type`: The type or brand of the target. +* `__meta_stackit_name`: The server name. +* `__meta_stackit_status`: The current status of the server. +* `__meta_stackit_power_status`: The power status of the server. + +See below for the configuration options for STACKIT discovery: + +```yaml +# The STACKIT project +project: + +# STACKIT region to use. No automatic discovery of the region is done. +[ region : | default = "eu01" ] + +# Custom API endpoint to be used. Format scheme://host:port +[ endpoint : ] + +# The port to scrape metrics from. +[ port: | default = 80 ] + +# Raw private key string used for authenticating a service account +[ private_key: ] + +# Path to a file containing the raw private key string +[ private_key_path: ] + +# Full JSON-formatted service account key used for authentication +[ service_account_key: ] + +# Path to a file containing the JSON-formatted service account key +[ service_account_key_path: ] + +# Path to a file containing STACKIT credentials. +[ credentials_file_path: ] + +# The time after which the servers are refreshed. +[ refresh_interval: | default = 60s ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] +``` + +A Service Account Token can be set through `http_config`. + +```yaml +stackit_sd_config: +- authorization: + credentials: +``` + ### `` [Triton](https://github.com/joyent/triton) SD configurations allow retrieving @@ -2890,54 +2434,12 @@ See below for the configuration options for Eureka discovery: # The URL to connect to the Eureka server. server: -# Sets the `Authorization` header on every request with the -# configured username and password. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configures the scrape request's TLS settings. -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - # Refresh interval to re-read the app instance list. [ refresh_interval: | default = 30s ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` See [the Prometheus eureka-sd configuration file](/documentation/examples/prometheus-eureka.yml) @@ -2968,6 +2470,8 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_scaleway_instance_project_id`: project id of the server * `__meta_scaleway_instance_public_ipv4`: the public IPv4 address of the server * `__meta_scaleway_instance_public_ipv6`: the public IPv6 address of the server +* `__meta_scaleway_instance_public_ipv4_addresses`: the public IPv4 addresses of the server +* `__meta_scaleway_instance_public_ipv6_addresses`: the public IPv6 addresses of the server * `__meta_scaleway_instance_region`: the region of the server * `__meta_scaleway_instance_security_group_id`: the ID of the security group of the server * `__meta_scaleway_instance_security_group_name`: the name of the security group of the server @@ -3038,27 +2542,9 @@ tags_filter: # Refresh interval to re-read the targets list. [ refresh_interval: | default = 60s ] -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` ### `` @@ -3098,49 +2584,9 @@ password: # Refresh interval to re-read the managed targets list. [ refresh_interval: | default = 60s ] -# Optional HTTP basic authentication information, currently not supported by Uyuni. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration, currently not supported by Uyuni. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration, currently not supported by Uyuni. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` See [the Prometheus uyuni-sd configuration file](/documentation/examples/prometheus-uyuni.yml) @@ -3175,60 +2621,15 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_vultr_instance_allowed_bandwidth_gb` : Monthly bandwidth quota in GB. ```yaml -# Authentication information used to authenticate to the API server. -# Note that `basic_auth` and `authorization` options are -# mutually exclusive. -# password and password_file are mutually exclusive. - -# Optional HTTP basic authentication information, not currently supported by Vultr. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - -# TLS configuration. -tls_config: - [ ] - # The port to scrape metrics from. [ port: | default = 80 ] # The time after which the instances are refreshed. [ refresh_interval: | default = 60s ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` @@ -3259,12 +2660,16 @@ Initially, aside from the configured per-target labels, a target's `job` label is set to the `job_name` value of the respective scrape configuration. The `__address__` label is set to the `:` address of the target. After relabeling, the `instance` label is set to the value of `__address__` by default if -it was not set during relabeling. The `__scheme__` and `__metrics_path__` labels -are set to the scheme and metrics path of the target respectively. The `__param_` -label is set to the value of the first passed URL parameter called ``. +it was not set during relabeling. + +The `__scheme__` and `__metrics_path__` labels +are set to the scheme and metrics path of the target respectively, as specified in `scrape_config`. + +The `__param_` +label is set to the value of the first passed URL parameter called ``, as defined in `scrape_config`. The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's -interval and timeout. +interval and timeout, as specified in `scrape_config`. Additional labels prefixed with `__meta_` may be available during the relabeling phase. They are set by the service discovery mechanism that provided @@ -3278,7 +2683,8 @@ input to a subsequent relabeling step), use the `__tmp` label name prefix. This prefix is guaranteed to never be used by Prometheus itself. ```yaml -# The source labels select values from existing labels. Their content is concatenated +# The source_labels tells the rule what labels to fetch from the series. Any +# labels which do not exist get a blank value (""). Their content is concatenated # using the configured separator and matched against the configured regular expression # for the replace, keep, and drop actions. [ source_labels: '[' [, ...] ']' ] @@ -3376,27 +2782,8 @@ through the `__alerts_path__` label. # Configures the protocol scheme used for requests. [ scheme: | default = http ] -# Sets the `Authorization` header on every request with the -# configured username and password. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optionally configures AWS's Signature Verification 4 signing process to -# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2. +# Optionally configures AWS's Signature Verification 4 signing process to sign requests. +# Cannot be set at the same time as basic_auth, authorization, oauth2, azuread or google_iam. # To use the default credentials from the AWS SDK, use `sigv4: {}`. sigv4: # The AWS region. If blank, the region from the default credentials chain @@ -3414,32 +2801,9 @@ sigv4: # AWS Role ARN, an alternative to using AWS API keys. [ role_arn: ] -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configures the scrape request's TLS settings. -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] # List of Azure service discovery configurations. azure_sd_configs: @@ -3537,6 +2901,10 @@ scaleway_sd_configs: serverset_sd_configs: [ - ... ] +# List of STACKIT service discovery configurations. +stackit_sd_configs: + [ - ... ] + # List of Triton service discovery configurations. triton_sd_configs: [ - ... ] @@ -3610,24 +2978,11 @@ write_relabel_configs: # For the `io.prometheus.write.v2.Request` message, this option is noop (always true). [ send_native_histograms: | default = false ] -# Sets the `Authorization` header on every remote write request with the -# configured username and password. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default = Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] +# When enabled, remote-write will resolve the URL host name via DNS, choose one of the IP addresses at random, and connect to it. +# When disabled, remote-write relies on Go's standard behavior, which is to try to connect to each address in turn. +# The connection timeout applies to the whole operation, i.e. in the latter case it is spread over all attempt. +# This is an experimental feature, and its behavior might still change, or even get removed. +[ round_robin_dns: | default = false ] # Optionally configures AWS's Signature Verification 4 signing process to # sign requests. Cannot be set at the same time as basic_auth, authorization, oauth2, or azuread. @@ -3648,20 +3003,15 @@ sigv4: # AWS Role ARN, an alternative to using AWS API keys. [ role_arn: ] -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread. -oauth2: - [ ] - # Optional AzureAD configuration. -# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4. +# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or google_iam. azuread: # The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. [ cloud: | default = AzurePublic ] - # Azure User-assigned Managed identity. + # Azure Managed Identity. Leave 'client_id' blank to use the default managed identity. [ managed_identity: - [ client_id: ] ] + [ client_id: ] ] # Azure OAuth. [ oauth: @@ -3674,27 +3024,13 @@ azuread: [ sdk: [ tenant_id: ] ] -# Configures the remote write request's TLS settings. -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default = false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default = true ] +# WARNING: Remote write is NOT SUPPORTED by Google Cloud. This configuration is reserved for future use. +# Optional Google Cloud Monitoring configuration. +# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread. +# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`. +google_iam: + # Service account key with monitoring write permissions. + credentials_file: # Configures the queue used to write to remote storage. queue_config: @@ -3737,6 +3073,11 @@ metadata_config: [ send_interval: | default = 1m ] # Maximum number of samples per send. [ max_samples_per_send: | default = 500] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +# enable_http2 defaults to false for remote-write. +[ ] ``` There is a list of @@ -3771,54 +3112,12 @@ headers: # the local storage should have complete data for. [ read_recent: | default = false ] -# Sets the `Authorization` header on every remote read request with the -# configured username and password. -# password and password_file are mutually exclusive. -basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# Optional `Authorization` header configuration. -authorization: - # Sets the authentication type. - [ type: | default: Bearer ] - # Sets the credentials. It is mutually exclusive with - # `credentials_file`. - [ credentials: ] - # Sets the credentials to the credentials read from the configured file. - # It is mutually exclusive with `credentials`. - [ credentials_file: ] - -# Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth or authorization. -oauth2: - [ ] - -# Configures the remote read request's TLS settings. -tls_config: - [ ] - -# Optional proxy URL. -[ proxy_url: ] -# Comma-separated string that can contain IPs, CIDR notation, domain names -# that should be excluded from proxying. IP and domain names can -# contain port numbers. -[ no_proxy: ] -# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] -# Specifies headers to send to proxies during CONNECT requests. -[ proxy_connect_header: - [ : [, ...] ] ] - -# Configure whether HTTP requests follow HTTP 3xx redirects. -[ follow_redirects: | default = true ] - -# Whether to enable HTTP2. -[ enable_http2: | default: true ] - # Whether to use the external labels as selectors for the remote read endpoint. [ filter_external_labels: | default = true ] + +# HTTP client settings, including authentication methods (such as basic auth and +# authorization), proxy configurations, TLS options, custom HTTP headers, etc. +[ ] ``` There is a list of @@ -3829,8 +3128,6 @@ with this feature. `tsdb` lets you configure the runtime-reloadable configuration settings of the TSDB. -NOTE: Out-of-order ingestion is an experimental feature, but you do not need any additional flag to enable it. Setting `out_of_order_time_window` to a positive duration enables it. - ```yaml # Configures how old an out-of-order/out-of-bounds sample can be w.r.t. the TSDB max time. # An out-of-order/out-of-bounds sample is ingested into the TSDB as long as the timestamp @@ -3842,8 +3139,8 @@ NOTE: Out-of-order ingestion is an experimental feature, but you do not need any # that is within the out-of-order window, or (b) too-old, i.e. not in-order # and before the out-of-order window. # -# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows -# the agent's WAL to accept out-of-order samples that fall within the specified time window relative +# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows +# the agent's WAL to accept out-of-order samples that fall within the specified time window relative # to the timestamp of the last appended sample for the same series. [ out_of_order_time_window: | default = 0s ] ``` diff --git a/docs/configuration/https.md b/docs/configuration/https.md index bc83e07a38..9a089ca922 100644 --- a/docs/configuration/https.md +++ b/docs/configuration/https.md @@ -3,8 +3,6 @@ title: HTTPS and authentication sort_rank: 7 --- -# HTTPS and authentication - Prometheus supports basic authentication and TLS. This is **experimental** and might change in the future. @@ -27,7 +25,7 @@ Generic placeholders are defined as follows: A valid example file can be found [here](/documentation/examples/web-config.yml). -``` +```yaml tls_server_config: # Certificate and key files for server to use to authenticate to client. cert_file: diff --git a/docs/configuration/recording_rules.md b/docs/configuration/recording_rules.md index 9aa226bbc0..6d668a4da3 100644 --- a/docs/configuration/recording_rules.md +++ b/docs/configuration/recording_rules.md @@ -1,10 +1,9 @@ --- -title: Recording rules +title: Defining recording rules +nav_title: Recording rules sort_rank: 2 --- -# Defining recording rules - ## Configuring rules Prometheus supports two types of rules which may be configured and then @@ -17,10 +16,6 @@ Rule files use YAML. The rule files can be reloaded at runtime by sending `SIGHUP` to the Prometheus process. The changes are only applied if all rule files are well-formatted. -_Note about native histograms (experimental feature): Native histogram are always -recorded as gauge histograms (for now). Most cases will create gauge histograms -naturally, e.g. after `rate()`._ - ## Syntax-checking rules To quickly check whether a rule file is syntactically correct without starting @@ -38,7 +33,7 @@ When the file is syntactically valid, the checker prints a textual representation of the parsed rules to standard output and then exits with a `0` return status. -If there are any syntax errors or invalid input arguments, it prints an error +If there are any syntax errors or invalid input arguments, it prints an error message to standard error and exits with a `1` return status. ## Recording rules @@ -75,7 +70,8 @@ groups: ``` ### `` -``` + +```yaml # The name of the group. Must be unique within a file. name: @@ -89,6 +85,11 @@ name: # Offset the rule evaluation timestamp of this particular group by the specified duration into the past. [ query_offset: | default = global.rule_query_offset ] +# Labels to add or overwrite before storing the result for its rules. +# Labels defined in will override the key if it has a collision. +labels: + [ : ] + rules: [ - ... ] ``` @@ -97,7 +98,7 @@ rules: The syntax for recording rules is: -``` +```yaml # The name of the time series to output to. Must be a valid metric name. record: @@ -113,7 +114,7 @@ labels: The syntax for alerting rules is: -``` +```yaml # The name of the alert. Must be a valid label value. alert: @@ -142,7 +143,7 @@ annotations: See also the [best practices for naming metrics created by recording rules](https://prometheus.io/docs/practices/rules/#recording-rules). -# Limiting alerts and series +## Limiting alerts and series A limit for alerts produced by alerting rules and series produced recording rules can be configured per-group. When the limit is exceeded, _all_ series produced @@ -151,9 +152,9 @@ the rule, active, pending, or inactive, are cleared as well. The event will be recorded as an error in the evaluation, and as such no stale markers are written. -# Rule query offset +## Rule query offset This is useful to ensure the underlying metrics have been received and stored in Prometheus. Metric availability delays are more likely to occur when Prometheus is running as a remote write target due to the nature of distributed systems, but can also occur when there's anomalies with scraping and/or short evaluation intervals. -# Failed rule evaluations due to slow evaluation +## Failed rule evaluations due to slow evaluation -If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group. +If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group. diff --git a/docs/configuration/template_examples.md b/docs/configuration/template_examples.md index 672295343f..bd076b256e 100644 --- a/docs/configuration/template_examples.md +++ b/docs/configuration/template_examples.md @@ -3,8 +3,6 @@ title: Template examples sort_rank: 4 --- -# Template examples - Prometheus supports templating in the annotations and labels of alerts, as well as in served console pages. Templates have the ability to run queries against the local database, iterate over data, use conditionals, @@ -13,7 +11,7 @@ templating](https://golang.org/pkg/text/template/) system. ## Simple alert field templates -``` +```yaml alert: InstanceDown expr: up == 0 for: 5m @@ -33,7 +31,7 @@ console instead. This displays a list of instances, and whether they are up: -```go +``` {{ range query "up" }} {{ .Labels.instance }} {{ .Value }} {{ end }} @@ -43,7 +41,7 @@ The special `.` variable contains the value of the current sample for each loop ## Display one value -```go +``` {{ with query "some_metric{instance='someinstance'}" }} {{ . | first | value | humanize }} {{ end }} @@ -58,7 +56,7 @@ formatting of results, and linking to the [expression browser](https://prometheu ## Using console URL parameters -```go +``` {{ with printf "node_memory_MemTotal{job='node',instance='%s'}" .Params.instance | query }} {{ . | first | value | humanize1024 }}B {{ end }} @@ -95,7 +93,7 @@ powerful when combined with [console library](template_reference.md#console-templates) support, allowing sharing of templates across consoles. -```go +``` {{/* Define the template */}} {{define "myTemplate"}} do something @@ -107,7 +105,7 @@ sharing of templates across consoles. Templates are limited to one argument. The `args` function can be used to wrap multiple arguments. -```go +``` {{define "myMultiArgTemplate"}} First argument: {{.arg0}} Second argument: {{.arg1}} diff --git a/docs/configuration/template_reference.md b/docs/configuration/template_reference.md index 47df9d1e09..57f2606b13 100644 --- a/docs/configuration/template_reference.md +++ b/docs/configuration/template_reference.md @@ -3,8 +3,6 @@ title: Template reference sort_rank: 5 --- -# Template reference - Prometheus supports templating in the annotations and labels of alerts, as well as in served console pages. Templates have the ability to run queries against the local database, iterate over data, use conditionals, @@ -17,8 +15,8 @@ The primary data structure for dealing with time series data is the sample, defi ```go type sample struct { - Labels map[string]string - Value interface{} + Labels map[string]string + Value interface{} } ``` @@ -68,7 +66,7 @@ versions. | Name | Arguments | Returns | Notes | | ------------- | ------------- | ------- | ----------- | -| title | string | string | [strings.Title](https://golang.org/pkg/strings/#Title), capitalises first character of each word.| +| title | string | string | [cases.Title](https://pkg.go.dev/golang.org/x/text/cases#Title), capitalises first character of each word.| | toUpper | string | string | [strings.ToUpper](https://golang.org/pkg/strings/#ToUpper), converts all characters to upper case.| | toLower | string | string | [strings.ToLower](https://golang.org/pkg/strings/#ToLower), converts all characters to lower case.| | stripPort | string | string | [net.SplitHostPort](https://pkg.go.dev/net#SplitHostPort), splits string into host and port, then returns only host.| diff --git a/docs/configuration/unit_testing_rules.md b/docs/configuration/unit_testing_rules.md index 163fcb91f1..4e2c62e1cf 100644 --- a/docs/configuration/unit_testing_rules.md +++ b/docs/configuration/unit_testing_rules.md @@ -1,10 +1,8 @@ --- -title: Unit Testing for Rules +title: Unit testing for rules sort_rank: 6 --- -# Unit Testing for Rules - You can use `promtool` to test your rules. ```shell @@ -24,6 +22,10 @@ rule_files: [ evaluation_interval: | default = 1m ] +# Setting fuzzy_compare true will very slightly weaken floating point comparisons. +# This will (effectively) ignore differences in the last bit of the mantissa. +[ fuzzy_compare: | default = false ] + # The order in which group names are listed below will be the order of evaluation of # rule groups (at a given evaluation time). The order is guaranteed only for the groups mentioned below. # All the groups need not be mentioned below. @@ -92,23 +94,25 @@ series: # # Native histogram notation: # Native histograms can be used instead of floating point numbers using the following notation: -# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}} +# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}} # Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'. # All properties are optional and default to 0. The order is not important. The following properties are supported: -# - schema (int): -# Currently valid schema numbers are -4 <= n <= 8. They are all for -# base-2 bucket schemas, where 1 is a bucket boundary in each case, and +# - schema (int): +# Currently valid schema numbers are -53 and -4 <= n <= 8. +# Schema -53 is the custom buckets schema, upper bucket boundaries are defined in custom_values +# like for classic histograms, and you shouldn't use z_bucket, z_bucket_w, n_buckets, n_offset. +# The rest are base-2 standard schemas, where 1.0 is a bucket boundary in each case, and # then each power of two is divided into 2^n logarithmic buckets. Or # in other words, each bucket boundary is the previous boundary times # 2^(2^-n). -# - sum (float): +# - sum (float): # The sum of all observations, including the zero bucket. -# - count (non-negative float): +# - count (non-negative float): # The number of observations, including those that are NaN and including the zero bucket. -# - z_bucket (non-negative float): +# - z_bucket (non-negative float): # The sum of all observations in the zero bucket. -# - z_bucket_w (non-negative float): -# The width of the zero bucket. +# - z_bucket_w (non-negative float): +# The width of the zero bucket. # If z_bucket_w > 0, the zero bucket contains all observations -z_bucket_w <= x <= z_bucket_w. # Otherwise, the zero bucket only contains observations that are exactly 0. # - buckets (list of non-negative floats): @@ -119,6 +123,12 @@ series: # Observation counts in negative buckets. Each represents an absolute count. # - n_offset (int): # The starting index of the first entry in the negative buckets. +# - counter_reset_hint (one of 'unknown', 'reset', 'not_reset' or 'gauge') +# The counter reset hint associated with this histogram. Defaults to 'unknown' if not set. +# - custom_values (list of floats in ascending order): +# The upper limits for custom buckets when schema is -53. +# These have the same role as the 'le' numbers in classic histograms. +# Do not append '+Inf' at the end, it is implicit. values: ``` diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 24d70647fd..4c390ab92e 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -3,36 +3,17 @@ title: Feature flags sort_rank: 12 --- -# Feature flags - Here is a list of features that are disabled by default since they are breaking changes or are considered experimental. Their behaviour can change in future releases which will be communicated via the [release changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md). You can enable them using the `--enable-feature` flag with a comma separated list of features. They may be enabled by default in future versions. -## Expand environment variables in external labels - -`--enable-feature=expand-external-labels` - -Replace `${var}` or `$var` in the [`external_labels`](configuration/configuration.md#configuration-file) -values according to the values of the current environment variables. References -to undefined variables are replaced by the empty string. -The `$` character can be escaped by using `$$`. - -## Remote Write Receiver - -`--enable-feature=remote-write-receiver` - -The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview). - -Activating the remote write receiver via a feature flag is deprecated. Use `--web.enable-remote-write-receiver` instead. This feature flag will be ignored in future versions of Prometheus. - ## Exemplars storage `--enable-feature=exemplar-storage` -[OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars) introduces the ability for scrape targets to add exemplars to certain metrics. Exemplars are references to data outside of the MetricSet. A common use case are IDs of program traces. +[OpenMetrics](https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars) introduces the ability for scrape targets to add exemplars to certain metrics. Exemplars are references to data outside of the MetricSet. A common use case are IDs of program traces. Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The config file block [storage](configuration/configuration.md#configuration-file)/[exemplars](configuration/configuration.md#exemplars) can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `trace_id=` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration). @@ -40,9 +21,8 @@ Exemplar storage is implemented as a fixed size circular buffer that stores exem `--enable-feature=memory-snapshot-on-shutdown` -This takes the snapshot of the chunks that are in memory along with the series information when shutting down and stores -it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped -chunks without the need of WAL replay. +This takes a snapshot of the chunks that are in memory along with the series information when shutting down and stores it on disk. This will reduce the startup time since the memory state can now be restored with this snapshot +and m-mapped chunks, while a WAL replay from disk is only needed for the parts of the WAL that are not part of the snapshot. ## Extra scrape metrics @@ -55,30 +35,6 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`. - `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`. -## New service discovery manager - -`--enable-feature=new-service-discovery-manager` - -When enabled, Prometheus uses a new service discovery manager that does not -restart unchanged discoveries upon reloading. This makes reloads faster and reduces -pressure on service discoveries' sources. - -Users are encouraged to test the new service discovery manager and report any -issues upstream. - -In future releases, this new service discovery manager will become the default and -this feature flag will be ignored. - -## Prometheus agent - -`--enable-feature=agent` - -When enabled, Prometheus runs in agent mode. The agent mode is limited to -discovery, scrape and remote write. - -This is useful when you do not need to query the Prometheus data locally, but -only from a central [remote endpoint](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). - ## Per-step stats `--enable-feature=promql-per-step-stats` @@ -89,29 +45,6 @@ statistics. Currently this is limited to totalQueryableSamples. When disabled in either the engine or the query, per-step statistics are not computed at all. -## Auto GOMAXPROCS - -`--enable-feature=auto-gomaxprocs` - -When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota. - -## Auto GOMEMLIMIT - -`--enable-feature=auto-gomemlimit` - -When enabled, the GOMEMLIMIT variable is automatically set to match the Linux container memory limit. If there is no container limit, or the process is running outside of containers, the system memory total is used. - -There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus. - -## No default scrape port - -`--enable-feature=no-default-scrape-port` - -When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be added to -the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior. -In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or -by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed. - ## Native Histograms `--enable-feature=native-histograms` @@ -134,74 +67,15 @@ those classic histograms that do not come with a corresponding native histogram. However, if a native histogram is present, Prometheus will ignore the corresponding classic histogram, with the notable exception of exemplars, which are always ingested. To keep the classic histograms as well, enable -`scrape_classic_histograms` in the scrape job. - -_Note about the format of `le` and `quantile` label values:_ - -In certain situations, the protobuf parsing changes the number formatting of -the `le` labels of classic histograms and the `quantile` labels of -summaries. Typically, this happens if the scraped target is instrumented with -[client_golang](https://github.com/prometheus/client_golang) provided that -[promhttp.HandlerOpts.EnableOpenMetrics](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus/promhttp#HandlerOpts) -is set to `false`. In such a case, integer label values are represented in the -text format as such, e.g. `quantile="1"` or `le="2"`. However, the protobuf parsing -changes the representation to float-like (following the OpenMetrics -specification), so the examples above become `quantile="1.0"` and `le="2.0"` after -ingestion into Prometheus, which changes the identity of the metric compared to -what was ingested before via the text format. - -The effect of this change is that alerts, recording rules and dashboards that -directly reference label values as whole numbers such as `le="1"` will stop -working. - -Aggregation by the `le` and `quantile` labels for vectors that contain the old and -new formatting will lead to unexpected results, and range vectors that span the -transition between the different formatting will contain additional series. -The most common use case for both is the quantile calculation via -`histogram_quantile`, e.g. -`histogram_quantile(0.95, sum by (le) (rate(histogram_bucket[10m])))`. -The `histogram_quantile` function already tries to mitigate the effects to some -extent, but there will be inaccuracies, in particular for shorter ranges that -cover only a few samples. - -Ways to deal with this change either globally or on a per metric basis: - -- Fix references to integer `le`, `quantile` label values, but otherwise do -nothing and accept that some queries that span the transition time will produce -inaccurate or unexpected results. -_This is the recommended solution, to get consistently normalized label values._ -Also Prometheus 3.0 is expected to enforce normalization of these label values. -- Use `metric_relabel_config` to retain the old labels when scraping targets. -This should **only** be applied to metrics that currently produce such labels. - - -```yaml - metric_relabel_configs: - - source_labels: - - quantile - target_label: quantile - regex: (\d+)\.0+ - - source_labels: - - le - - __name__ - target_label: le - regex: (\d+)\.0+;.*_bucket -``` - -## OTLP Receiver - -`--enable-feature=otlp-write-receiver` - -The OTLP receiver allows Prometheus to accept [OpenTelemetry](https://opentelemetry.io/) metrics writes. -Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features -won't work when you push OTLP metrics. +`always_scrape_classic_histograms` in the scrape job. ## Experimental PromQL functions `--enable-feature=promql-experimental-functions` -Enables PromQL functions that are considered experimental and whose name or -semantics could change. +Enables PromQL functions that are considered experimental. These functions +might change their name, syntax, or semantics. They might also get removed +entirely. ## Created Timestamps Zero Injection @@ -225,6 +99,12 @@ This has the potential to improve rule group evaluation latency and resource uti The number of concurrent rule evaluations can be configured with `--rules.max-concurrent-rule-evals`, which is set to `4` by default. +## Serve old Prometheus UI + +Fall back to serving the old (Prometheus 2.x) web UI instead of the new UI. The new UI that was released as part of Prometheus 3.0 is a complete rewrite and aims to be cleaner, less cluttered, and more modern under the hood. However, it is not fully feature complete and battle-tested yet, so some users may still prefer using the old UI. + +`--enable-feature=old-ui` + ## Metadata WAL Records `--enable-feature=metadata-wal-records` @@ -232,5 +112,193 @@ The number of concurrent rule evaluations can be configured with `--rules.max-co When enabled, Prometheus will store metadata in-memory and keep track of metadata changes as WAL records on a per-series basis. -This must be used if -you are also using remote write 2.0 as it will only gather metadata from the WAL. +This must be used if you would like to send metadata using the new remote write 2.0. + +## Delay compaction start time + +`--enable-feature=delayed-compaction` + +A random offset, up to `10%` of the chunk range, is added to the Head compaction start time. This assists Prometheus instances in avoiding simultaneous compactions and reduces the load on shared resources. + +Only auto Head compactions and the operations directly resulting from them are subject to this delay. + +In the event of multiple consecutive Head compactions being possible, only the first compaction experiences this delay. + +Note that during this delay, the Head continues its usual operations, which include serving and appending series. + +Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place. + +## Delay `__name__` label removal for PromQL engine + +`--enable-feature=promql-delayed-name-removal` + +When enabled, Prometheus will change the way in which the `__name__` label is removed from PromQL query results (for functions and expressions for which this is necessary). Specifically, it will delay the removal to the last step of the query evaluation, instead of every time an expression or function creating derived metrics is evaluated. + +This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label. + +Note that evaluating parts of the query separately will still trigger the +labelset collision. This commonly happens when analyzing intermediate results +of a query manually or with a tool like PromLens. + +If a query refers to the already removed `__name__` label, its behavior may +change while this feature flag is set. (Example: `sum by (__name__) +(rate({foo="bar"}[5m]))`, see [details on +GitHub](https://github.com/prometheus/prometheus/issues/11397#issuecomment-1451998792).) +These queries are rare to occur and easy to fix. (In the above example, +removing `by (__name__)` doesn't change anything without the feature flag and +fixes the possible problem with the feature flag.) + +## Auto Reload Config + +`--enable-feature=auto-reload-config` + +When enabled, Prometheus will automatically reload its configuration file at a +specified interval. The interval is defined by the +`--config.auto-reload-interval` flag, which defaults to `30s`. + +Configuration reloads are triggered by detecting changes in the checksum of the +main configuration file or any referenced files, such as rule and scrape +configurations. To ensure consistency and avoid issues during reloads, it's +recommended to update these files atomically. + +## OTLP Delta Conversion + +`--enable-feature=otlp-deltatocumulative` + +When enabled, Prometheus will convert OTLP metrics from delta temporality to their +cumulative equivalent, instead of dropping them. This cannot be enabled in conjunction with `otlp-native-delta-ingestion`. + +This uses +[deltatocumulative][d2c] +from the OTel collector, using its default settings. + +Delta conversion keeps in-memory state to aggregate delta changes per-series over time. +When Prometheus restarts, this state is lost, starting the aggregation from zero +again. This results in a counter reset in the cumulative series. + +This state is periodically ([`max_stale`][d2c]) cleared of inactive series. + +Enabling this _can_ have negative impact on performance, because the in-memory +state is mutex guarded. Cumulative-only OTLP requests are not affected. + +[d2c]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor + +## PromQL arithmetic expressions in time durations + +`--enable-feature=promql-duration-expr` + +With this flag, arithmetic expressions can be used in time durations in range queries and offset durations. + +In range queries: +``` +rate(http_requests_total[5m * 2]) # 10 minute range +rate(http_requests_total[(5+2) * 1m]) # 7 minute range +``` + +In offset durations: +``` +http_requests_total offset (1h / 2) # 30 minute offset +http_requests_total offset ((2 ^ 3) * 1m) # 8 minute offset +``` + +When using offset with duration expressions, you must wrap the expression in +parentheses. Without parentheses, only the first duration value will be used in +the offset calculation. + +`step()` can be used in duration expressions. +For a **range query**, it resolves to the step width of the range query. +For an **instant query**, it resolves to `0s`. + +`min(, )` and `max(, )` can be used to find the minimum or maximum of two duration expressions. + +**Note**: Duration expressions are not supported in the @ timestamp operator. + +The following operators are supported: + +* `+` - addition +* `-` - subtraction +* `*` - multiplication +* `/` - division +* `%` - modulo +* `^` - exponentiation + +Examples of equivalent durations: + +* `5m * 2` is equivalent to `10m` or `600s` +* `10m - 1m` is equivalent to `9m` or `540s` +* `(5+2) * 1m` is equivalent to `7m` or `420s` +* `1h / 2` is equivalent to `30m` or `1800s` +* `4h % 3h` is equivalent to `1h` or `3600s` +* `(2 ^ 3) * 1m` is equivalent to `8m` or `480s` +* `step() + 1` is equivalent to the query step width increased by 1s. +* `max(step(), 5s)` is equivalent to the larger of the query step width and `5s`. +* `min(2 * step() + 5s, 5m)` is equivalent to the smaller of twice the query step increased by `5s` and `5m`. + + +## OTLP Native Delta Support + +`--enable-feature=otlp-native-delta-ingestion` + +When enabled, allows for the native ingestion of delta OTLP metrics, storing the raw sample values without conversion. This cannot be enabled in conjunction with `otlp-deltatocumulative`. + +Currently, the StartTimeUnixNano field is ignored, and deltas are given the unknown metric metadata type. + +Delta support is in a very early stage of development and the ingestion and querying process my change over time. For the open proposal see [prometheus/proposals#48](https://github.com/prometheus/proposals/pull/48). + +### Querying + +We encourage users to experiment with deltas and existing PromQL functions; we will collect feedback and likely build features to improve the experience around querying deltas. + +Note that standard PromQL counter functions like `rate()` and `increase()` are designed for cumulative metrics and will produce incorrect results when used with delta metrics. This may change in the future, but for now, to get similar results for delta metrics, you need `sum_over_time()`: + +* `sum_over_time(delta_metric[])`: Calculates the sum of delta values over the specified time range. +* `sum_over_time(delta_metric[]) / `: Calculates the per-second rate of the delta metric. + +These may not work well if the `` is not a multiple of the collection interval of the metric. For example, if you do `sum_over_time(delta_metric[1m]) / 1m` range query (with a 1m step), but the collection interval of a metric is 10m, the graph will show a single point every 10 minutes with a high rate value, rather than 10 points with a lower, constant value. + +### Current gotchas + +* If delta metrics are exposed via [federation](https://prometheus.io/docs/prometheus/latest/federation/), data can be incorrectly collected if the ingestion interval is not the same as the scrape interval for the federated endpoint. + +* It is difficult to figure out whether a metric has delta or cumulative temporality, since there's no indication of temporality in metric names or labels. For now, if you are ingesting a mix of delta and cumulative metrics we advise you to explicitly add your own labels to distinguish them. In the future, we plan to introduce type labels to consistently distinguish metric types and potentially make PromQL functions type-aware (e.g. providing warnings when cumulative-only functions are used with delta metrics). + +* If there are multiple samples being ingested at the same timestamp, only one of the points is kept - the samples are **not** summed together (this is how Prometheus works in general - duplicate timestamp samples are rejected). Any aggregation will have to be done before sending samples to Prometheus. + +## Type and Unit Labels + +`--enable-feature=type-and-unit-labels` + +When enabled, Prometheus will start injecting additional, reserved `__type__` +and `__unit__` labels as designed in the [PROM-39 proposal](https://github.com/prometheus/proposals/pull/39). + +Those labels are sourced from the metadata structured of the existing scrape and ingestion formats +like OpenMetrics Text, Prometheus Text, Prometheus Proto, Remote Write 2 and OTLP. All the user provided labels with +`__type__` and `__unit__` will be overridden. + +PromQL layer will handle those labels the same way __name__ is handled, e.g. dropped +on certain operations like `-` or `+` and affected by `promql-delayed-name-removal` feature. + +This feature enables important metadata information to be accessible directly with samples and PromQL layer. + +It's especially useful for users who: + +* Want to be able to select metrics based on type or unit. +* Want to handle cases of series with the same metric name and different type and units. + e.g. native histogram migrations or OpenTelemetry metrics from OTLP endpoint, without translation. + +In future more [work is planned](https://github.com/prometheus/prometheus/issues/16610) that will depend on this e.g. rich PromQL UX that helps +when wrong types are used on wrong functions, automatic renames, delta types and more. + +## Use Uncached IO + +`--enable-feature=use-uncached-io` + +Experimental and only available on Linux. + +When enabled, it makes chunks writing bypass the page cache. Its primary +goal is to reduce confusion around page‐cache behavior and to prevent over‑allocation of +memory in response to misleading cache growth. + +This is currently implemented using direct I/O. + +For more details, see the [proposal](https://github.com/prometheus/proposals/pull/45). diff --git a/docs/federation.md b/docs/federation.md index 3344a0ed05..580fa30874 100644 --- a/docs/federation.md +++ b/docs/federation.md @@ -3,8 +3,6 @@ title: Federation sort_rank: 6 --- -# Federation - Federation allows a Prometheus server to scrape selected time series from another Prometheus server. diff --git a/docs/getting_started.md b/docs/getting_started.md index e89ac705ee..aeba295da8 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -3,8 +3,6 @@ title: Getting started sort_rank: 1 --- -# Getting started - This guide is a "Hello World"-style tutorial which shows how to install, configure, and use a simple Prometheus instance. You will download and run Prometheus locally, configure it to scrape itself and an example application, @@ -200,7 +198,7 @@ To record the time series resulting from this expression into a new metric called `job_instance_mode:node_cpu_seconds:avg_rate5m`, create a file with the following recording rule and save it as `prometheus.rules.yml`: -``` +```yaml groups: - name: cpu-node rules: @@ -262,6 +260,9 @@ process ID. ## Shutting down your instance gracefully. While Prometheus does have recovery mechanisms in the case that there is an -abrupt process failure it is recommend to use the `SIGTERM` signal to cleanly -shutdown a Prometheus instance. If you're running on Linux this can be performed -by using `kill -s SIGTERM `, replacing `` with your Prometheus process ID. +abrupt process failure it is recommended to use signals or interrupts for a +clean shutdown of a Prometheus instance. On Linux, this can be done by sending +the `SIGTERM` or `SIGINT` signals to the Prometheus process. For example, you +can use `kill -s `, replacing `` with the signal name +and `` with the Prometheus process ID. Alternatively, you can press the +interrupt character at the controlling terminal, which by default is `^C` (Control-C). diff --git a/docs/http_sd.md b/docs/http_sd.md index 884deb9f3c..3bd6bada39 100644 --- a/docs/http_sd.md +++ b/docs/http_sd.md @@ -1,14 +1,13 @@ --- -title: HTTP SD +title: Writing HTTP service discovery +nav_title: HTTP SD sort_rank: 7 --- -# Writing HTTP Service Discovery - Prometheus provides a generic [HTTP Service Discovery](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config), that enables it to discover targets over an HTTP endpoint. -The HTTP Service Discovery is complimentary to the supported service +The HTTP Service Discovery is complementary to the supported service discovery mechanisms, and is an alternative to [File-based Service Discovery](https://prometheus.io/docs/guides/file-sd/#use-file-based-service-discovery-to-discover-scrape-targets). ## Comparison between File-Based SD and HTTP SD @@ -40,7 +39,7 @@ an empty list `[]`. Target lists are unordered. Prometheus caches target lists. If an error occurs while fetching an updated targets list, Prometheus keeps using the current targets list. The targets list -is not saved across restart. The `prometheus_sd_http_failures_total` counter +is not saved across restart. The `prometheus_sd_http_failures_total` counter metric tracks the number of refresh failures. The whole list of targets must be returned on every scrape. There is no support diff --git a/docs/installation.md b/docs/installation.md index c8e359e780..fbab67a59d 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -3,8 +3,6 @@ title: Installation sort_rank: 2 --- -# Installation - ## Using pre-compiled binaries We provide precompiled binaries for most official Prometheus components. Check diff --git a/docs/management_api.md b/docs/management_api.md index 0b496cbaa7..ecc46fad88 100644 --- a/docs/management_api.md +++ b/docs/management_api.md @@ -3,12 +3,10 @@ title: Management API sort_rank: 8 --- -# Management API - Prometheus provides a set of management APIs to facilitate automation and integration. -### Health check +## Health check ``` GET /-/healthy @@ -18,7 +16,7 @@ HEAD /-/healthy This endpoint always returns 200 and should be used to check Prometheus health. -### Readiness check +## Readiness check ``` GET /-/ready @@ -28,7 +26,7 @@ HEAD /-/ready This endpoint returns 200 when Prometheus is ready to serve traffic (i.e. respond to queries). -### Reload +## Reload ``` PUT /-/reload @@ -40,7 +38,7 @@ This endpoint triggers a reload of the Prometheus configuration and rule files. Alternatively, a configuration reload can be triggered by sending a `SIGHUP` to the Prometheus process. -### Quit +## Quit ``` PUT /-/quit diff --git a/docs/migration.md b/docs/migration.md index cb88bbfd6f..55e366456d 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -1,200 +1,245 @@ --- -title: Migration +title: Prometheus 3.0 migration guide +nav_title: Migration sort_rank: 10 --- -# Prometheus 2.0 migration guide - -In line with our [stability promise](https://prometheus.io/blog/2016/07/18/prometheus-1-0-released/#fine-print), -the Prometheus 2.0 release contains a number of backwards incompatible changes. -This document offers guidance on migrating from Prometheus 1.8 to Prometheus 2.0 and newer versions. +In line with our [stability promise](https://prometheus.io/docs/prometheus/latest/stability/), +the Prometheus 3.0 release contains a number of backwards incompatible changes. +This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 and newer versions. ## Flags -The format of Prometheus command line flags has changed. Instead of a -single dash, all flags now use a double dash. Common flags (`--config.file`, -`--web.listen-address` and `--web.external-url`) remain but -almost all storage-related flags have been removed. +- The following feature flags have been removed and they have been added to the + default behavior of Prometheus v3: + - `promql-at-modifier` + - `promql-negative-offset` + - `new-service-discovery-manager` + - `expand-external-labels` + - Environment variable references `${var}` or `$var` in external label values + are replaced according to the values of the current environment variables. + - References to undefined variables are replaced by the empty string. + The `$` character can be escaped by using `$$`. + - `no-default-scrape-port` + - Prometheus v3 will no longer add ports to scrape targets according to the + specified scheme. Target will now appear in labels as configured. + - If you rely on scrape targets like + `https://example.com/metrics` or `http://example.com/metrics` to be + represented as `https://example.com/metrics:443` and + `http://example.com/metrics:80` respectively, add them to your target URLs + - `agent` + - Instead use the dedicated `--agent` CLI flag. + - `remote-write-receiver` + - Instead use the dedicated `--web.enable-remote-write-receiver` CLI flag to enable the remote write receiver. + - `auto-gomemlimit` + - Prometheus v3 will automatically set `GOMEMLIMIT` to match the Linux + container memory limit. If there is no container limit, or the process is + running outside of containers, the system memory total is used. To disable + this, `--no-auto-gomemlimit` is available. + - `auto-gomaxprocs` + - Prometheus v3 will automatically set `GOMAXPROCS` to match the Linux + container CPU quota. To disable this, `--no-auto-gomaxprocs` is available. -Some notable flags which have been removed: + Prometheus v3 will log a warning if you continue to pass these to + `--enable-feature`. -- `-alertmanager.url` In Prometheus 2.0, the command line flags for configuring - a static Alertmanager URL have been removed. Alertmanager must now be - discovered via service discovery, see [Alertmanager service discovery](#alertmanager-service-discovery). +## Configuration -- `-log.format` In Prometheus 2.0 logs can only be streamed to standard error. - -- `-query.staleness-delta` has been renamed to `--query.lookback-delta`; Prometheus - 2.0 introduces a new mechanism for handling staleness, see [staleness](querying/basics.md#staleness). - -- `-storage.local.*` Prometheus 2.0 introduces a new storage engine; as such all - flags relating to the old engine have been removed. For information on the - new engine, see [Storage](#storage). - -- `-storage.remote.*` Prometheus 2.0 has removed the deprecated remote - storage flags, and will fail to start if they are supplied. To write to - InfluxDB, Graphite, or OpenTSDB use the relevant storage adapter. - -## Alertmanager service discovery - -Alertmanager service discovery was introduced in Prometheus 1.4, allowing Prometheus -to dynamically discover Alertmanager replicas using the same mechanism as scrape -targets. In Prometheus 2.0, the command line flags for static Alertmanager config -have been removed, so the following command line flag: - -``` -./prometheus -alertmanager.url=http://alertmanager:9093/ -``` - -Would be replaced with the following in the `prometheus.yml` config file: - -```yaml -alerting: - alertmanagers: - - static_configs: - - targets: - - alertmanager:9093 -``` - -You can also use all the usual Prometheus service discovery integrations and -relabeling in your Alertmanager configuration. This snippet instructs -Prometheus to search for Kubernetes pods, in the `default` namespace, with the -label `name: alertmanager` and with a non-empty port. - -```yaml -alerting: - alertmanagers: - - kubernetes_sd_configs: - - role: pod - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - relabel_configs: - - source_labels: [__meta_kubernetes_pod_label_name] - regex: alertmanager - action: keep - - source_labels: [__meta_kubernetes_namespace] - regex: default - action: keep - - source_labels: [__meta_kubernetes_pod_container_port_number] - regex: - action: drop -``` - -## Recording rules and alerts - -The format for configuring alerting and recording rules has been changed to YAML. -An example of a recording rule and alert in the old format: - -``` -job:request_duration_seconds:histogram_quantile99 = - histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m]))) - -ALERT FrontendRequestLatency - IF job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1 - FOR 5m - ANNOTATIONS { - summary = "High frontend request latency", - } -``` - -Would look like this: - -```yaml -groups: -- name: example.rules - rules: - - record: job:request_duration_seconds:histogram_quantile99 - expr: histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m]))) - - alert: FrontendRequestLatency - expr: job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1 - for: 5m - annotations: - summary: High frontend request latency -``` - -To help with the change, the `promtool` tool has a mode to automate the rules conversion. Given a `.rules` file, it will output a `.rules.yml` file in the -new format. For example: - -``` -$ promtool update rules example.rules -``` - -You will need to use `promtool` from [Prometheus 2.5](https://github.com/prometheus/prometheus/releases/tag/v2.5.0) as later versions no longer contain the above subcommand. - -## Storage - -The data format in Prometheus 2.0 has completely changed and is not backwards -compatible with 1.8 and older versions. To retain access to your historic monitoring data we -recommend you run a non-scraping Prometheus instance running at least version -1.8.1 in parallel with your Prometheus 2.0 instance, and have the new server -read existing data from the old one via the remote read protocol. - -Your Prometheus 1.8 instance should be started with the following flags and an -config file containing only the `external_labels` setting (if any): - -``` -$ ./prometheus-1.8.1.linux-amd64/prometheus -web.listen-address ":9094" -config.file old.yml -``` - -Prometheus 2.0 can then be started (on the same machine) with the following flags: - -``` -$ ./prometheus-2.0.0.linux-amd64/prometheus --config.file prometheus.yml -``` - -Where `prometheus.yml` contains in addition to your full existing configuration, the stanza: - -```yaml -remote_read: - - url: "http://localhost:9094/api/v1/read" -``` +- The scrape job level configuration option `scrape_classic_histograms` has been + renamed to `always_scrape_classic_histograms`. If you use the + `--enable-feature=native-histograms` feature flag to ingest native histograms + and you also want to ingest classic histograms that an endpoint might expose + along with native histograms, be sure to add this configuration or change your + configuration from the old name. +- The `http_config.enable_http2` in `remote_write` items default has been + changed to `false`. In Prometheus v2 the remote write http client would + default to use http2. In order to parallelize multiple remote write queues + across multiple sockets its preferable to not default to http2. + If you prefer to use http2 for remote write you must now set + `http_config.enable_http2: true` in your `remote_write` configuration section. ## PromQL -The following features have been removed from PromQL: +### Regular expressions match newlines -- `drop_common_labels` function - the `without` aggregation modifier should be used - instead. -- `keep_common` aggregation modifier - the `by` modifier should be used instead. -- `count_scalar` function - use cases are better handled by `absent()` or correct - propagation of labels in operations. +The `.` pattern in regular expressions in PromQL matches newline characters. +With this change a regular expressions like `.*` matches strings that include +`\n`. This applies to matchers in queries and relabel configs. -See [issue #3060](https://github.com/prometheus/prometheus/issues/3060) for more -details. +For example, the following regular expressions now match the accompanying +strings, whereas in Prometheus v2 these combinations didn't match. + - `.*` additionally matches `foo\n` and `Foo\nBar` + - `foo.?bar` additionally matches `foo\nbar` + - `foo.+bar` additionally matches `foo\nbar` + +If you want Prometheus v3 to behave like v2, you will have to change your +regular expressions by replacing all `.` patterns with `[^\n]`, e.g. +`foo[^\n]*`. + +### Range selectors and lookback exclude samples coinciding with the left boundary + +Lookback and range selectors are now left-open and right-closed (previously +left-closed and right-closed), which makes their behavior more consistent. This +change affects queries where the left boundary of a range or the lookback delta +coincides with the timestamp of one or more samples. + +For example, assume we are querying a timeseries with evenly spaced samples +exactly 1 minute apart. Before Prometheus v3, a range query with `5m` would +usually return 5 samples. But if the query evaluation aligns perfectly with a +scrape, it would return 6 samples. In Prometheus v3 queries like this will +always return 5 samples given even spacing. + +This change will typically affect subqueries because their evaluation timing is +naturally perfectly evenly spaced and aligned with timestamps that are multiples +of the subquery resolution. Furthermore, query frontends often align subqueries +to multiples of the step size. In combination, this easily creates a situation +of perfect mutual alignment, often unintended and unknown by the user, so that +the new behavior might come as a surprise. Before Prometheus V3, a subquery of +`foo[1m:1m]` on such a system might have always returned two points, allowing +for rate calculations. In Prometheus V3, however, such a subquery will only +return one point, which is insufficient for a rate or increase calculation, +resulting in No Data returned. + +Such queries will need to be rewritten to extend the window to properly cover +more than one point. In this example, `foo[2m:1m]` would always return two +points no matter the query alignment. The exact form of the rewritten query may +depend on the intended results and there is no universal drop-in replacement for +queries whose behavior has changed. + +Tests are similarly more likely to affected. To fix those either adjust the +expected number of samples or extend the range. + +### holt_winters function renamed + +The `holt_winters` function has been renamed to `double_exponential_smoothing` +and is now guarded by the `promql-experimental-functions` feature flag. +If you want to keep using `holt_winters`, you have to do both of these things: + - Rename `holt_winters` to `double_exponential_smoothing` in your queries. + - Pass `--enable-feature=promql-experimental-functions` in your Prometheus + CLI invocation. + +## Scrape protocols +Prometheus v3 is more strict concerning the Content-Type header received when +scraping. Prometheus v2 would default to the standard Prometheus text protocol +if the target being scraped did not specify a Content-Type header or if the +header was unparsable or unrecognised. This could lead to incorrect data being +parsed in the scrape. Prometheus v3 will now fail the scrape in such cases. + +If a scrape target is not providing the correct Content-Type header the +fallback protocol can be specified using the `fallback_scrape_protocol` +parameter. See [Prometheus scrape_config documentation.](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) + +This is a breaking change as scrapes that may have succeeded with Prometheus v2 +may now fail if this fallback protocol is not specified. ## Miscellaneous -### Prometheus non-root user +### TSDB format and downgrade -The Prometheus Docker image is now built to [run Prometheus -as a non-root user](https://github.com/prometheus/prometheus/pull/2859). If you -want the Prometheus UI/API to listen on a low port number (say, port 80), you'll -need to override it. For Kubernetes, you would use the following YAML: +The TSDB format has been changed slightly in Prometheus v2.55 in preparation for changes +to the index format. Consequently, a Prometheus v3 TSDB can only be read by a +Prometheus v2.55 or newer. Keep that in mind when upgrading to v3 -- you will be only +able to downgrade to v2.55, not lower, without losing your TSDB persistent data. + +As an extra safety measure, you could optionally consider upgrading to v2.55 first and +confirm Prometheus works as expected, before upgrading to v3. + +### TSDB storage contract + +TSDB compatible storage is now expected to return results matching the specified +selectors. This might impact some third party implementations, most likely +implementing `remote_read`. + +This contract is not explicitly enforced, but can cause undefined behavior. + +### UTF-8 names + +Prometheus v3 supports UTF-8 in metric and label names. This means metric and +label names can change after upgrading according to what is exposed by +endpoints. Furthermore, metric and label names that would have previously been +flagged as invalid no longer will be. + +Users wishing to preserve the original validation behavior can update their +Prometheus yaml configuration to specify the legacy validation scheme: + +``` +global: + metric_name_validation_scheme: legacy +``` + +Or on a per-scrape basis: + +``` +scrape_configs: + - job_name: job1 + metric_name_validation_scheme: utf8 + - job_name: job2 + metric_name_validation_scheme: legacy +``` + +### Log message format +Prometheus v3 has adopted `log/slog` over the previous `go-kit/log`. This +results in a change of log message format. An example of the old log format is: + +``` +ts=2024-10-23T22:01:06.074Z caller=main.go:627 level=info msg="No time or size retention was set so using the default time retention" duration=15d +ts=2024-10-23T22:01:06.074Z caller=main.go:671 level=info msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=91d80252c3e528728b0f88d254dd720f6be07cb8-modified)" +ts=2024-10-23T22:01:06.074Z caller=main.go:676 level=info build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)" +ts=2024-10-23T22:01:06.074Z caller=main.go:677 level=info host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))" +``` + +a similar sequence in the new log format looks like this: + +``` +time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:640 msg="No time or size retention was set so using the default time retention" duration=15d +time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:681 msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=7c7116fea8343795cae6da42960cacd0207a2af8)" +time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:686 msg="operational information" build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)" host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))" fd_limits="(soft=1048576, hard=1048576)" vm_limits="(soft=unlimited, hard=unlimited)" +``` + +### `le` and `quantile` label values +In Prometheus v3, the values of the `le` label of classic histograms and the +`quantile` label of summaries are normalized upon ingestion. In Prometheus v2 +the value of these labels depended on the scrape protocol (protobuf vs text +format) in some situations. This led to label values changing based on the +scrape protocol. E.g. a metric exposed as `my_classic_hist{le="1"}` would be +ingested as `my_classic_hist{le="1"}` via the text format, but as +`my_classic_hist{le="1.0"}` via protobuf. This changed the identity of the +metric and caused problems when querying the metric. +In Prometheus v3 these label values will always be normalized to a float like +representation. I.e. the above example will always result in +`my_classic_hist{le="1.0"}` being ingested into prometheus, no matter via which +protocol. The effect of this change is that alerts, recording rules and +dashboards that directly reference label values as whole numbers such as +`le="1"` will stop working. + +Ways to deal with this change either globally or on a per metric basis: + +- Fix references to integer `le`, `quantile` label values, but otherwise do +nothing and accept that some queries that span the transition time will produce +inaccurate or unexpected results. +_This is the recommended solution._ +- Use `metric_relabel_config` to retain the old labels when scraping targets. +This should **only** be applied to metrics that currently produce such labels. ```yaml -apiVersion: v1 -kind: Pod -metadata: - name: security-context-demo-2 -spec: - securityContext: - runAsUser: 0 -... + metric_relabel_configs: + - source_labels: + - quantile + target_label: quantile + regex: (\d+)\.0+ + - source_labels: + - le + - __name__ + target_label: le + regex: (\d+)\.0+;.*_bucket ``` -See [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) -for more details. +### Disallow configuring Alertmanager with the v1 API +Prometheus 3 no longer supports Alertmanager's v1 API. Effectively Prometheus 3 +requires [Alertmanager 0.16.0](https://github.com/prometheus/alertmanager/releases/tag/v0.16.0) or later. Users with older Alertmanager +versions or configurations that use `alerting: alertmanagers: [api_version: v1]` +need to upgrade Alertmanager and change their configuration to use `api_version: v2`. -If you're using Docker, then the following snippet would be used: +## Prometheus 2.0 migration guide -``` -docker run -p 9090:9090 prom/prometheus:latest -``` - -### Prometheus lifecycle - -If you use the Prometheus `/-/reload` HTTP endpoint to [automatically reload your -Prometheus config when it changes](configuration/configuration.md), -these endpoints are disabled by default for security reasons in Prometheus 2.0. -To enable them, set the `--web.enable-lifecycle` flag. +For the migration guide from Prometheus 1.8 to 2.0 please refer to the [Prometheus v2.55 documentation](https://prometheus.io/docs/prometheus/2.55/migration/). diff --git a/docs/querying/api.md b/docs/querying/api.md index efa244fbc8..a17bd1e3a2 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -3,8 +3,6 @@ title: HTTP API sort_rank: 7 --- -# HTTP API - The current stable HTTP API is reachable under `/api/v1` on a Prometheus server. Any non-breaking additions will be added under that endpoint. @@ -32,7 +30,7 @@ will be returned in the data field. The JSON response envelope format is as follows: -``` +```json { "status": "success" | "error", "data": , @@ -45,7 +43,7 @@ The JSON response envelope format is as follows: // Only set if there were warnings while executing the request. // There will still be data in the data field. "warnings": [""], - // Only set if there were info-level annnotations while executing the request. + // Only set if there were info-level annotations while executing the request. "infos": [""] } ``` @@ -59,7 +57,7 @@ timestamps are always represented as Unix timestamps in seconds. * ``: Prometheus [time series selectors](basics.md#time-series-selectors) like `http_requests_total` or `http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded. -* ``: [Prometheus duration strings](basics.md#time-durations). +* ``: [the subset of Prometheus float literals using time units](basics.md#float-literals-and-time-durations). For example, `5m` refers to a duration of 5 minutes. * ``: boolean values (strings `true` and `false`). @@ -86,6 +84,7 @@ URL query parameters: - `time=`: Evaluation timestamp. Optional. - `timeout=`: Evaluation timeout. Optional. Defaults to and is capped by the value of the `-query.timeout` flag. +- `limit=`: Maximum number of returned series. Doesn’t affect scalars or strings but truncates the number of series for matrices and vectors. Optional. 0 means disabled. The current server time is used if the `time` parameter is omitted. @@ -95,7 +94,7 @@ query that may breach server-side URL character limits. The `data` section of the query result has the following format: -``` +```json { "resultType": "matrix" | "vector" | "scalar" | "string", "result": @@ -109,8 +108,11 @@ formats](#expression-query-result-formats). The following example evaluates the expression `up` at the time `2015-07-01T20:10:51.781Z`: +```bash +curl 'http://localhost:9090/api/v1/query?query=up&time=2015-07-01T20:10:51.781Z' +``` + ```json -$ curl 'http://localhost:9090/api/v1/query?query=up&time=2015-07-01T20:10:51.781Z' { "status" : "success", "data" : { @@ -154,6 +156,7 @@ URL query parameters: - `step=`: Query resolution step width in `duration` format or float number of seconds. - `timeout=`: Evaluation timeout. Optional. Defaults to and is capped by the value of the `-query.timeout` flag. +- `limit=`: Maximum number of returned series. Optional. 0 means disabled. You can URL-encode these parameters directly in the request body by using the `POST` method and `Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large @@ -161,7 +164,7 @@ query that may breach server-side URL character limits. The `data` section of the query result has the following format: -``` +```json { "resultType": "matrix", "result": @@ -174,8 +177,11 @@ format](#range-vectors). The following example evaluates the expression `up` over a 30-second range with a query resolution of 15 seconds. +```bash +curl 'http://localhost:9090/api/v1/query_range?query=up&start=2015-07-01T20:10:30.781Z&end=2015-07-01T20:11:00.781Z&step=15s' +``` + ```json -$ curl 'http://localhost:9090/api/v1/query_range?query=up&start=2015-07-01T20:10:30.781Z&end=2015-07-01T20:11:00.781Z&step=15s' { "status" : "success", "data" : { @@ -231,14 +237,89 @@ The `data` section of the query result is a string containing the formatted quer The following example formats the expression `foo/bar`: +```bash +curl 'http://localhost:9090/api/v1/format_query?query=foo/bar' +``` + ```json -$ curl 'http://localhost:9090/api/v1/format_query?query=foo/bar' { "status" : "success", "data" : "foo / bar" } ``` +## Parsing a PromQL expressions into a abstract syntax tree (AST) + +This endpoint is **experimental** and might change in the future. It is currently only meant to be used by Prometheus' own web UI, and the endpoint name and exact format returned may change from one Prometheus version to another. It may also be removed again in case it is no longer needed by the UI. + +The following endpoint parses a PromQL expression and returns it as a JSON-formatted AST (abstract syntax tree) representation: + +``` +GET /api/v1/parse_query +POST /api/v1/parse_query +``` + +URL query parameters: + +- `query=`: Prometheus expression query string. + +You can URL-encode these parameters directly in the request body by using the `POST` method and +`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large +query that may breach server-side URL character limits. + +The `data` section of the query result is a string containing the AST of the parsed query expression. + +The following example parses the expression `foo/bar`: + +```bash +curl 'http://localhost:9090/api/v1/parse_query?query=foo/bar' +``` + +```json +{ + "data" : { + "bool" : false, + "lhs" : { + "matchers" : [ + { + "name" : "__name__", + "type" : "=", + "value" : "foo" + } + ], + "name" : "foo", + "offset" : 0, + "startOrEnd" : null, + "timestamp" : null, + "type" : "vectorSelector" + }, + "matching" : { + "card" : "one-to-one", + "include" : [], + "labels" : [], + "on" : false + }, + "op" : "/", + "rhs" : { + "matchers" : [ + { + "name" : "__name__", + "type" : "=", + "value" : "bar" + } + ], + "name" : "bar", + "offset" : 0, + "startOrEnd" : null, + "timestamp" : null, + "type" : "vectorSelector" + }, + "type" : "binaryExpr" + }, + "status" : "success" +} +``` + ## Querying metadata Prometheus offers a set of API endpoints to query metadata about series and their labels. @@ -272,8 +353,11 @@ contain the label name/value pairs which identify each series. The following example returns all series that match either of the selectors `up` or `process_start_time_seconds{job="prometheus"}`: +```bash +curl -g 'http://localhost:9090/api/v1/series?' --data-urlencode 'match[]=up' --data-urlencode 'match[]=process_start_time_seconds{job="prometheus"}' +``` + ```json -$ curl -g 'http://localhost:9090/api/v1/series?' --data-urlencode 'match[]=up' --data-urlencode 'match[]=process_start_time_seconds{job="prometheus"}' { "status" : "success", "data" : [ @@ -318,8 +402,11 @@ The `data` section of the JSON response is a list of string label names. Here is an example. +```bash +curl 'localhost:9090/api/v1/labels' +``` + ```json -$ curl 'localhost:9090/api/v1/labels' { "status": "success", "data": [ @@ -364,18 +451,46 @@ URL query parameters: series from which to read the label values. Optional. - `limit=`: Maximum number of returned series. Optional. 0 means disabled. - The `data` section of the JSON response is a list of string label values. -This example queries for all label values for the `job` label: +This example queries for all label values for the `http_status_code` label: + +```bash +curl http://localhost:9090/api/v1/label/http_status_code/values +``` ```json -$ curl http://localhost:9090/api/v1/label/job/values { "status" : "success", "data" : [ - "node", - "prometheus" + "200", + "504" + ] +} +``` + +Label names can optionally be encoded using the Values Escaping method, and is necessary if a name includes the `/` character. To encode a name in this way: + +* Prepend the label with `U__`. +* Letters, numbers, and colons appear as-is. +* Convert single underscores to double underscores. +* For all other characters, use the UTF-8 codepoint as a hex integer, surrounded + by underscores. So ` ` becomes `_20_` and a `.` becomes `_2e_`. + + More information about text escaping can be found in the original UTF-8 [Proposal document](https://github.com/prometheus/proposals/blob/main/proposals/2023-08-21-utf8.md#text-escaping). + +This example queries for all label values for the `http.status_code` label: + +```bash +curl http://localhost:9090/api/v1/label/U__http_2e_status_code/values +``` + +```json +{ + "status" : "success", + "data" : [ + "200", + "404" ] } ``` @@ -396,8 +511,11 @@ URL query parameters: - `start=`: Start timestamp. - `end=`: End timestamp. +```bash +curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=2020-09-14T15:23:25.479Z' +``` + ```json -$ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=2020-09-14T15:23:25.479Z' { "status": "success", "data": [ @@ -463,7 +581,7 @@ is explained in detail in its own section below. Range vectors are returned as result type `matrix`. The corresponding `result` property has the following format: -``` +```json [ { "metric": { "": "", ... }, @@ -485,7 +603,7 @@ and [`sort_by_label`](functions.md#sort_by_label) have no effect for range vecto Instant vectors are returned as result type `vector`. The corresponding `result` property has the following format: -``` +```json [ { "metric": { "": "", ... }, @@ -499,7 +617,7 @@ Instant vectors are returned as result type `vector`. The corresponding Each series could have the `"value"` key, or the `"histogram"` key, but not both. Series are not guaranteed to be returned in any particular order unless a function -such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)` +such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label) is used. ### Scalars @@ -507,7 +625,7 @@ is used. Scalar results are returned as result type `scalar`. The corresponding `result` property has the following format: -``` +```json [ , "" ] ``` @@ -516,7 +634,7 @@ Scalar results are returned as result type `scalar`. The corresponding String results are returned as result type `string`. The corresponding `result` property has the following format: -``` +```json [ , "" ] ``` @@ -527,7 +645,7 @@ The `` placeholder used above is formatted as follows. _Note that native histograms are an experimental feature, and the format below might still change._ -``` +```json { "count": "", "sum": "", @@ -561,8 +679,11 @@ Dropped targets are subject to `keep_dropped_targets` limit, if set. `labels` represents the label set after relabeling has occurred. `discoveredLabels` represent the unmodified labels retrieved during service discovery before relabeling has occurred. +```bash +curl http://localhost:9090/api/v1/targets +``` + ```json -$ curl http://localhost:9090/api/v1/targets { "status": "success", "data": { @@ -599,6 +720,7 @@ $ curl http://localhost:9090/api/v1/targets "__scrape_timeout__": "10s", "job": "node" }, + "scrapePool": "node" } ] } @@ -610,9 +732,12 @@ The `state` query parameter allows the caller to filter by active or dropped tar Note that an empty array is still returned for targets that are filtered out. Other values are ignored. +```bash +curl 'http://localhost:9090/api/v1/targets?state=active' +``` + ```json -$ curl 'http://localhost:9090/api/v1/targets?state=active' -{ + "status": "success", "data": { "activeTargets": [ @@ -643,9 +768,12 @@ $ curl 'http://localhost:9090/api/v1/targets?state=active' The `scrapePool` query parameter allows the caller to filter by scrape pool name. +```bash +curl 'http://localhost:9090/api/v1/targets?scrapePool=node_exporter' +``` + ```json -$ curl 'http://localhost:9090/api/v1/targets?scrapePool=node_exporter' -{ + "status": "success", "data": { "activeTargets": [ @@ -693,12 +821,16 @@ URL query parameters: - `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. - `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done. - `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. -- `exclude_alerts=`: only return rules, do not return active alerts. +- `exclude_alerts=`: only return rules, do not return active alerts. - `match[]=`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional. +- `group_limit=`: The `group_limit` parameter allows you to specify a limit for the number of rule groups that is returned in a single response. If the total number of rule groups exceeds the specified `group_limit` value, the response will include a `groupNextToken` property. You can use the value of this `groupNextToken` property in subsequent requests in the `group_next_token` parameter to paginate over the remaining rule groups. The `groupNextToken` property will not be present in the final response, indicating that you have retrieved all the available rule groups. Please note that there are no guarantees regarding the consistency of the response if the rule groups are being modified during the pagination process. +- `group_next_token`: the pagination token that was returned in previous request when the `group_limit` property is set. The pagination token is used to iteratively paginate over a large number of rule groups. To use the `group_next_token` parameter, the `group_limit` parameter also need to be present. If a rule group that coincides with the next token is removed while you are paginating over the rule groups, a response with status code 400 will be returned. + +```bash +curl http://localhost:9090/api/v1/rules +``` ```json -$ curl http://localhost:9090/api/v1/rules - { "data": { "groups": [ @@ -761,9 +893,11 @@ guarantees as the overarching API v1. GET /api/v1/alerts ``` -```json -$ curl http://localhost:9090/api/v1/alerts +```bash +curl http://localhost:9090/api/v1/alerts +``` +```json { "data": { "alerts": [ @@ -808,6 +942,9 @@ curl -G http://localhost:9091/api/v1/targets/metadata \ --data-urlencode 'metric=go_goroutines' \ --data-urlencode 'match_target={job="prometheus"}' \ --data-urlencode 'limit=2' +``` + +```json { "status": "success", "data": [ @@ -834,11 +971,14 @@ curl -G http://localhost:9091/api/v1/targets/metadata \ ``` The following example returns metadata for all metrics for all targets with -label `instance="127.0.0.1:9090`. +label `instance="127.0.0.1:9090"`. -```json +```bash curl -G http://localhost:9091/api/v1/targets/metadata \ --data-urlencode 'match_target={instance="127.0.0.1:9090"}' +``` + +```json { "status": "success", "data": [ @@ -887,9 +1027,11 @@ The `data` section of the query result consists of an object where each key is a The following example returns two metrics. Note that the metric `http_requests_total` has more than one object in the list. At least one target has a value for `HELP` that do not match with the rest. -```json +```bash curl -G http://localhost:9090/api/v1/metadata?limit=2 +``` +```json { "status": "success", "data": { @@ -918,9 +1060,11 @@ curl -G http://localhost:9090/api/v1/metadata?limit=2 The following example returns only one metadata entry for each metric. -```json +```bash curl -G http://localhost:9090/api/v1/metadata?limit_per_metric=1 +``` +```json { "status": "success", "data": { @@ -944,9 +1088,11 @@ curl -G http://localhost:9090/api/v1/metadata?limit_per_metric=1 The following example returns metadata only for the metric `http_requests_total`. -```json +```bash curl -G http://localhost:9090/api/v1/metadata?metric=http_requests_total +``` +```json { "status": "success", "data": { @@ -977,8 +1123,11 @@ GET /api/v1/alertmanagers Both the active and dropped Alertmanagers are part of the response. +```bash +curl http://localhost:9090/api/v1/alertmanagers +``` + ```json -$ curl http://localhost:9090/api/v1/alertmanagers { "status": "success", "data": { @@ -1011,8 +1160,11 @@ GET /api/v1/status/config The config is returned as dumped YAML file. Due to limitation of the YAML library, YAML comments are not included. +```bash +curl http://localhost:9090/api/v1/status/config +``` + ```json -$ curl http://localhost:9090/api/v1/status/config { "status": "success", "data": { @@ -1031,8 +1183,11 @@ GET /api/v1/status/flags All values are of the result type `string`. +```bash +curl http://localhost:9090/api/v1/status/flags +``` + ```json -$ curl http://localhost:9090/api/v1/status/flags { "status": "success", "data": { @@ -1058,13 +1213,18 @@ GET /api/v1/status/runtimeinfo The returned values are of different types, depending on the nature of the runtime property. +```bash +curl http://localhost:9090/api/v1/status/runtimeinfo +``` + ```json -$ curl http://localhost:9090/api/v1/status/runtimeinfo { "status": "success", "data": { "startTime": "2019-11-02T17:23:59.301361365+01:00", "CWD": "/", + "hostname" : "DESKTOP-717H17Q", + "serverTime": "2025-01-05T18:27:33Z", "reloadConfigSuccess": true, "lastConfigTime": "2019-11-02T17:23:59+01:00", "timeSeriesCount": 873, @@ -1092,8 +1252,11 @@ GET /api/v1/status/buildinfo All values are of the result type `string`. +```bash +curl http://localhost:9090/api/v1/status/buildinfo +``` + ```json -$ curl http://localhost:9090/api/v1/status/buildinfo { "status": "success", "data": { @@ -1119,9 +1282,11 @@ The following endpoint returns various cardinality statistics about the Promethe GET /api/v1/status/tsdb ``` URL query parameters: + - `limit=`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned. -The `data` section of the query result consists of +The `data` section of the query result consists of: + - **headStats**: This provides the following data about the head block of the TSDB: - **numSeries**: The number of series. - **chunkCount**: The number of chunks. @@ -1132,8 +1297,11 @@ The `data` section of the query result consists of - **memoryInBytesByLabelName** This will provide a list of the label names and memory used in bytes. Memory usage is calculated by adding the length of all values for a given label name. - **seriesCountByLabelPair** This will provide a list of label value pairs and their series count. +```bash +curl http://localhost:9090/api/v1/status/tsdb +``` + ```json -$ curl http://localhost:9090/api/v1/status/tsdb { "status": "success", "data": { @@ -1197,16 +1365,19 @@ The following endpoint returns information about the WAL replay: GET /api/v1/status/walreplay ``` -**read**: The number of segments replayed so far. -**total**: The total number segments needed to be replayed. -**progress**: The progress of the replay (0 - 100%). -**state**: The state of the replay. Possible states: -- **waiting**: Waiting for the replay to start. -- **in progress**: The replay is in progress. -- **done**: The replay has finished. +- **read**: The number of segments replayed so far. +- **total**: The total number segments needed to be replayed. +- **progress**: The progress of the replay (0 - 100%). +- **state**: The state of the replay. Possible states: + - **waiting**: Waiting for the replay to start. + - **in progress**: The replay is in progress. + - **done**: The replay has finished. + +```bash +curl http://localhost:9090/api/v1/status/walreplay +``` ```json -$ curl http://localhost:9090/api/v1/status/walreplay { "status": "success", "data": { @@ -1238,8 +1409,11 @@ URL query parameters: - `skip_head=`: Skip data present in the head block. Optional. +```bash +curl -XPOST http://localhost:9090/api/v1/admin/tsdb/snapshot +``` + ```json -$ curl -XPOST http://localhost:9090/api/v1/admin/tsdb/snapshot { "status": "success", "data": { @@ -1271,8 +1445,8 @@ Not mentioning both start and end times would clear all the data for the matched Example: -```json -$ curl -X POST \ +```bash +curl -X POST \ -g 'http://localhost:9090/api/v1/admin/tsdb/delete_series?match[]=up&match[]=process_start_time_seconds{job="prometheus"}' ``` @@ -1292,8 +1466,8 @@ PUT /api/v1/admin/tsdb/clean_tombstones This takes no parameters or body. -```json -$ curl -XPOST http://localhost:9090/api/v1/admin/tsdb/clean_tombstones +```bash +curl -XPOST http://localhost:9090/api/v1/admin/tsdb/clean_tombstones ``` *New in v2.1 and supports PUT from v2.9* @@ -1319,8 +1493,89 @@ is not considered an efficient way of ingesting samples. Use it with caution for specific low-volume use cases. It is not suitable for replacing the ingestion via scraping. -Enable the OTLP receiver by the feature flag -`--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver +Enable the OTLP receiver by setting +`--web.enable-otlp-receiver`. When enabled, the OTLP receiver endpoint is `/api/v1/otlp/v1/metrics`. *New in v2.47* + +### OTLP Delta + +Prometheus can convert incoming metrics from delta temporality to their cumulative equivalent. +This is done using [deltatocumulative](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor) from the OpenTelemetry Collector. + +To enable, pass `--enable-feature=otlp-deltatocumulative`. + +*New in v3.2* + +## Notifications + +The following endpoints provide information about active status notifications concerning the Prometheus server itself. +Notifications are used in the web UI. + +These endpoints are **experimental**. They may change in the future. + +### Active Notifications + +The `/api/v1/notifications` endpoint returns a list of all currently active notifications. + +``` +GET /api/v1/notifications +``` + +Example: + +```bash +curl http://localhost:9090/api/v1/notifications +``` + +``` +{ + "status": "success", + "data": [ + { + "text": "Prometheus is shutting down and gracefully stopping all operations.", + "date": "2024-10-07T12:33:08.551376578+02:00", + "active": true + } + ] +} +``` + +*New in v3.0* + +### Live Notifications + +The `/api/v1/notifications/live` endpoint streams live notifications as they occur, using [Server-Sent Events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events). Deleted notifications are sent with `active: false`. Active notifications will be sent when connecting to the endpoint. + +``` +GET /api/v1/notifications/live +``` + +Example: + +```bash +curl http://localhost:9090/api/v1/notifications/live +``` + +``` +data: { + "status": "success", + "data": [ + { + "text": "Prometheus is shutting down and gracefully stopping all operations.", + "date": "2024-10-07T12:33:08.551376578+02:00", + "active": true + } + ] +} +``` + +**Note:** The `/notifications/live` endpoint will return a `204 No Content` response if the maximum number of subscribers has been reached. You can set the maximum number of listeners with the flag `--web.max-notifications-subscribers`, which defaults to 16. + +``` +GET /api/v1/notifications/live +204 No Content +``` + +*New in v3.0* diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 1c72adb3e5..ba915854cb 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -4,13 +4,17 @@ nav_title: Basics sort_rank: 1 --- -# Querying Prometheus - Prometheus provides a functional query language called PromQL (Prometheus Query Language) that lets the user select and aggregate time series data in real -time. The result of an expression can either be shown as a graph, viewed as -tabular data in Prometheus's expression browser, or consumed by external -systems via the [HTTP API](api.md). +time. + +When you send a query request to Prometheus, it can be an _instant query_, evaluated at one point in time, +or a _range query_ at equally-spaced steps between a start and an end time. PromQL works exactly the same +in each cases; the range query is just like an instant query run multiple times at different timestamps. + +In the Prometheus UI, the "Table" tab is for instant queries and the "Graph" tab is for range queries. + +Other programs can fetch the result of a PromQL expression via the [HTTP API](api.md). ## Examples @@ -29,18 +33,31 @@ evaluate to one of four types: Depending on the use-case (e.g. when graphing vs. displaying the output of an expression), only some of these types are legal as the result of a -user-specified expression. For example, an expression that returns an instant -vector is the only type which can be graphed. +user-specified expression. +For [instant queries](api.md#instant-queries), any of the above data types are allowed as the root of the expression. +[Range queries](api.md#range-queries) only support scalar-typed and instant-vector-typed expressions. _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../../feature_flags.md#native-histograms). + flag](../feature_flags.md#native-histograms). * Once native histograms have been ingested into the TSDB (and even after disabling the feature flag again), both instant vectors and range vectors may now contain samples that aren't simple floating point numbers (float samples) but complete histograms (histogram samples). A vector may contain a mix of - float samples and histogram samples. + float samples and histogram samples. Note that the term “histogram sample” in + the PromQL documentation always refers to a native histogram. Classic + histograms are broken up into a number of series of float samples. From the + perspective of PromQL, there are no “classic histogram samples”. +* Like float samples, histogram samples can be counters or gauges, also called + counter histograms or gauge histograms, respectively. +* Native histograms can have different bucket layouts, but they are generally + convertible to compatible versions to apply binary and aggregation operations + to them. This is not true for all bucketing schemas. If incompatible + histograms are encountered in an operation, the corresponding output vector + element is removed from the result, flagged with a warn-level annotation. + More details can be found in the + [native histogram specification](https://prometheus.io/docs/specs/native_histograms/#compatibility-between-histograms). ## Literals @@ -62,9 +79,10 @@ Example: 'these are unescaped: \n \\ \t' `these are not unescaped: \n ' " \t` -### Float literals +### Float literals and time durations -Scalar float values can be written as literal integer or floating-point numbers in the format (whitespace only included for better readability): +Scalar float values can be written as literal integer or floating-point numbers +in the format (whitespace only included for better readability): [-+]?( [0-9]*\.?[0-9]+([eE][-+]?[0-9]+)? @@ -81,22 +99,57 @@ Examples: 0x8f -Inf NaN - -As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change. +Additionally, underscores (`_`) can be used in between decimal or hexadecimal +digits to improve readability. Examples: - 1s # Equivalent to 1.0 - 2m # Equivalent to 120.0 - 1ms # Equivalent to 0.001 - + 1_000_000 + .123_456_789 + 0x_53_AB_F3_82 + +Float literals are also used to specify durations in seconds. For convenience, +decimal integer numbers may be combined with the following +time units: + +* `ms` – milliseconds +* `s` – seconds – 1s equals 1000ms +* `m` – minutes – 1m equals 60s (ignoring leap seconds) +* `h` – hours – 1h equals 60m +* `d` – days – 1d equals 24h (ignoring so-called daylight saving time) +* `w` – weeks – 1w equals 7d +* `y` – years – 1y equals 365d (ignoring leap days) + +Suffixing a decimal integer number with one of the units above is a different +representation of the equivalent number of seconds as a bare float literal. + +Examples: + + 1s # Equivalent to 1. + 2m # Equivalent to 120. + 1ms # Equivalent to 0.001. + -2h # Equivalent to -7200. + +The following examples do _not_ work: + + 0xABm # No suffixing of hexadecimal numbers. + 1.5h # Time units cannot be combined with a floating point. + +Infd # No suffixing of ±Inf or NaN. + +Multiple units can be combined by concatenation of suffixed integers. Units +must be ordered from the longest to the shortest. A given unit must only appear +once per float literal. + +Examples: + + 1h30m # Equivalent to 5400s and thus 5400. + 12h34m56s # Equivalent to 45296s and thus 45296. + 54s321ms # Equivalent to 54.321. ## Time series selectors -Time series selectors are responsible for selecting the times series and raw or inferred sample timestamps and values. - -Time series *selectors* are not to be confused with higher level concept of instant and range *queries* that can execute the time series *selectors*. A higher level instant query would evaluate the given selector at one point in time, however the range query would evaluate the selector at multiple different times in between a minimum and maximum timestamp at regular steps. +These are the basic building-blocks that instruct PromQL what data to fetch. ### Instant vector selectors @@ -105,8 +158,16 @@ single sample value for each at a given timestamp (point in time). In the simpl form, only a metric name is specified, which results in an instant vector containing elements for all time series that have this metric name. +The value returned will be that of the most recent sample at or before the +query's evaluation timestamp (in the case of an +[instant query](api.md#instant-queries)) +or the current step within the query (in the case of a +[range query](api.md#range-queries)). +The [`@` modifier](#modifier) allows overriding the timestamp relative to which +the selection takes place. Time series are only returned if their most recent sample is less than the [lookback period](#staleness) ago. + This example selects all time series that have the `http_requests_total` metric -name: +name, returning the most recent sample for each: http_requests_total @@ -127,7 +188,7 @@ against regular expressions. The following label matching operators exist: * `=~`: Select labels that regex-match the provided string. * `!~`: Select labels that do not regex-match the provided string. -Regex matches are fully anchored. A match of `env=~"foo"` is treated as `env=~"^foo$"`. +[Regex](#regular-expressions) matches are fully anchored. A match of `env=~"foo"` is treated as `env=~"^foo$"`. For example, this selects all `http_requests_total` time series for `staging`, `testing`, and `development` environments and HTTP methods other than `GET`. @@ -154,7 +215,7 @@ and would exclude: http_requests_total{environment="development"} -Multiple matchers can be used for the same label name; they all must pass for a result to be returned. +Multiple matchers can be used for the same label name; they all must pass for a result to be returned. The query: @@ -190,59 +251,25 @@ A workaround for this restriction is to use the `__name__` label: {__name__="on"} # Good! -All regular expressions in Prometheus use [RE2 -syntax](https://github.com/google/re2/wiki/Syntax). - ### Range Vector Selectors Range vector literals work like instant vector literals, except that they -select a range of samples back from the current instant. Syntactically, a [time -duration](#time-durations) is appended in square brackets (`[]`) at the end of -a vector selector to specify how far back in time values should be fetched for -each resulting range vector element. The range is a closed interval, -i.e. samples with timestamps coinciding with either boundary of the range are -still included in the selection. +select a range of samples back from the current instant. Syntactically, a +[float literal](#float-literals-and-time-durations) is appended in square +brackets (`[]`) at the end of a vector selector to specify for how many seconds +back in time values should be fetched for each resulting range vector element. +Commonly, the float literal uses the syntax with one or more time units, e.g. +`[5m]`. The range is a left-open and right-closed interval, i.e. samples with +timestamps coinciding with the left boundary of the range are excluded from the +selection, while samples coinciding with the right boundary of the range are +included in the selection. -In this example, we select all the values we have recorded within the last 5 -minutes for all time series that have the metric name `http_requests_total` and -a `job` label set to `prometheus`: +In this example, we select all the values recorded less than 5m ago for all +time series that have the metric name `http_requests_total` and a `job` label +set to `prometheus`: http_requests_total{job="prometheus"}[5m] -### Time Durations - -Time durations are specified as a number, followed immediately by one of the -following units: - -* `ms` - milliseconds -* `s` - seconds -* `m` - minutes -* `h` - hours -* `d` - days - assuming a day always has 24h -* `w` - weeks - assuming a week always has 7d -* `y` - years - assuming a year always has 365d1 - -1 For days in a year, the leap day is ignored, and conversely, for a minute, a leap second is ignored. - -Time durations can be combined by concatenation. Units must be ordered from the -longest to the shortest. A given unit must only appear once in a time duration. - -Here are some examples of valid time durations: - - 5h - 1h30m - 5m - 10s - - -As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change. - -Examples: - - 1.0 # Equivalent to 1s - 0.001 # Equivalent to 1ms - 120 # Equivalent to 2m - ### Offset modifier The `offset` modifier allows changing the time offset for individual @@ -278,7 +305,7 @@ Note that this allows a query to look ahead of its evaluation time. The `@` modifier allows changing the evaluation time for individual instant and range vectors in a query. The time supplied to the `@` modifier -is a unix timestamp and described with a float literal. +is a unix timestamp and described with a float literal. For example, the following expression returns the value of `http_requests_total` at `2021-01-04T07:40:00+00:00`: @@ -325,7 +352,7 @@ Note that the `@` modifier allows a query to look ahead of its evaluation time. Subquery allows you to run an instant query for a given range and resolution. The result of a subquery is a range vector. -Syntax: ` '[' ':' [] ']' [ @ ] [ offset ]` +Syntax: ` '[' ':' [] ']' [ @ ] [ offset ]` * `` is optional. Default is the global evaluation interval. @@ -345,6 +372,12 @@ PromQL supports line comments that start with `#`. Example: # This is a comment +## Regular expressions + +All regular expressions in Prometheus use [RE2 syntax](https://github.com/google/re2/wiki/Syntax). + +Regex matches are always fully anchored. + ## Gotchas ### Staleness @@ -354,8 +387,9 @@ independently of the actual present time series data. This is mainly to support cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated time series do not precisely align in time. Because of their independence, Prometheus needs to assign a value at those timestamps for each relevant time -series. It does so by taking the newest sample before this timestamp within the lookback period. -The lookback period is 5 minutes by default. +series. It does so by taking the newest sample that is less than the lookback period ago. +The lookback period is 5 minutes by default, but can be +[set with the `--query.lookback-delta` flag](../command-line/prometheus.md) If a target scrape or rule evaluation no longer returns a sample for a time series that was previously present, this time series will be marked as stale. @@ -367,11 +401,11 @@ as stale, then no value is returned for that time series. If new samples are subsequently ingested for that time series, they will be returned as expected. A time series will go stale when it is no longer exported, or the target no -longer exists. Such time series will disappear from graphs +longer exists. Such time series will disappear from graphs at the times of their latest collected sample, and they will not be returned in queries after they are marked stale. -Some exporters, which put their own timestamps on samples, get a different behaviour: +Some exporters, which put their own timestamps on samples, get a different behaviour: series that stop being exported take the last value for (by default) 5 minutes before disappearing. The `track_timestamps_staleness` setting can change this. diff --git a/docs/querying/examples.md b/docs/querying/examples.md index 8287ff6f62..a732334a95 100644 --- a/docs/querying/examples.md +++ b/docs/querying/examples.md @@ -1,11 +1,9 @@ --- -title: Querying examples +title: Query examples nav_title: Examples sort_rank: 4 --- -# Query examples - ## Simple time series selection Return all time series with the metric `http_requests_total`: @@ -25,14 +23,11 @@ for the same vector, making it a [range vector](../basics/#range-vector-selector Note that an expression resulting in a range vector cannot be graphed directly, but viewed in the tabular ("Console") view of the expression browser. -Using regular expressions, you could select time series only for jobs whose +Using [regular expressions](./basics.md#regular-expressions), you could select time series only for jobs whose name match a certain pattern, in this case, all jobs that end with `server`: http_requests_total{job=~".*server"} -All regular expressions in Prometheus use [RE2 -syntax](https://github.com/google/re2/wiki/Syntax). - To select all HTTP status codes except 4xx ones, you could run: http_requests_total{status!~"4.."} diff --git a/docs/querying/functions.md b/docs/querying/functions.md index de65e693d0..d274de0b72 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -4,37 +4,22 @@ nav_title: Functions sort_rank: 3 --- -# Functions - Some functions have default arguments, e.g. `year(v=vector(time()) instant-vector)`. This means that there is one argument `v` which is an instant vector, which if not provided it will default to the value of the expression `vector(time())`. -_Notes about the experimental native histograms:_ - -* Ingesting native histograms has to be enabled via a [feature - flag](../feature_flags.md#native-histograms). As long as no native histograms - have been ingested into the TSDB, all functions will behave as usual. -* Functions that do not explicitly mention native histograms in their - documentation (see below) will ignore histogram samples. -* Functions that do already act on native histograms might still change their - behavior in the future. -* If a function requires the same bucket layout between multiple native - histograms it acts on, it will automatically convert them - appropriately. (With the currently supported bucket schemas, that's always - possible.) - ## `abs()` -`abs(v instant-vector)` returns the input vector with all sample values converted to -their absolute value. +`abs(v instant-vector)` returns a vector containing all float samples in the +input vector converted to their absolute value. Histogram samples in the input +vector are ignored silently. ## `absent()` `absent(v instant-vector)` returns an empty vector if the vector passed to it -has any elements (floats or native histograms) and a 1-element vector with the -value 1 if the vector passed to it has no elements. +has any elements (float samples or histogram samples) and a 1-element vector +with the value 1 if the vector passed to it has no elements. This is useful for alerting on when no time series exist for a given metric name and label combination. @@ -56,8 +41,9 @@ of the 1-element output vector from the input vector. ## `absent_over_time()` `absent_over_time(v range-vector)` returns an empty vector if the range vector -passed to it has any elements (floats or native histograms) and a 1-element -vector with the value 1 if the range vector passed to it has no elements. +passed to it has any elements (float samples or histogram samples) and a +1-element vector with the value 1 if the range vector passed to it has no +elements. This is useful for alerting on when no time series exist for a given metric name and label combination for a certain amount of time. @@ -78,8 +64,9 @@ labels of the 1-element output vector from the input vector. ## `ceil()` -`ceil(v instant-vector)` rounds the sample values of all elements in `v` up to -the nearest integer value greater than or equal to v. +`ceil(v instant-vector)` returns a vector containing all float samples in the +input vector rounded up to the nearest integer value greater than or equal to +their original value. Histogram samples in the input vector are ignored silently. * `ceil(+Inf) = +Inf` * `ceil(±0) = ±0` @@ -90,49 +77,62 @@ the nearest integer value greater than or equal to v. For each input time series, `changes(v range-vector)` returns the number of times its value has changed within the provided time range as an instant -vector. +vector. A float sample followed by a histogram sample, or vice versa, counts as +a change. A counter histogram sample followed by a gauge histogram sample with +otherwise exactly the same values, or vice versa, does not count as a change. ## `clamp()` -`clamp(v instant-vector, min scalar, max scalar)` -clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`. +`clamp(v instant-vector, min scalar, max scalar)` clamps the values of all +float samples in `v` to have a lower limit of `min` and an upper limit of +`max`. Histogram samples in the input vector are ignored silently. Special cases: * Return an empty vector if `min > max` -* Return `NaN` if `min` or `max` is `NaN` +* Float samples are clamped to `NaN` if `min` or `max` is `NaN` ## `clamp_max()` -`clamp_max(v instant-vector, max scalar)` clamps the sample values of all -elements in `v` to have an upper limit of `max`. +`clamp_max(v instant-vector, max scalar)` clamps the values of all float +samples in `v` to have an upper limit of `max`. Histogram samples in the input +vector are ignored silently. ## `clamp_min()` -`clamp_min(v instant-vector, min scalar)` clamps the sample values of all -elements in `v` to have a lower limit of `min`. +`clamp_min(v instant-vector, min scalar)` clamps the values of all float +samples in `v` to have a lower limit of `min`. Histogram samples in the input +vector are ignored silently. ## `day_of_month()` -`day_of_month(v=vector(time()) instant-vector)` returns the day of the month -for each of the given times in UTC. Returned values are from 1 to 31. +`day_of_month(v=vector(time()) instant-vector)` interpretes float samples in +`v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the +day of the month (in UTC) for each of those timestamps. Returned values are +from 1 to 31. Histogram samples in the input vector are ignored silently. ## `day_of_week()` -`day_of_week(v=vector(time()) instant-vector)` returns the day of the week for -each of the given times in UTC. Returned values are from 0 to 6, where 0 means -Sunday etc. +`day_of_week(v=vector(time()) instant-vector)` interpretes float samples in `v` +as timestamps (number of seconds since January 1, 1970 UTC) and returns the day +of the week (in UTC) for each of those timestamps. Returned values are from 0 +to 6, where 0 means Sunday etc. Histogram samples in the input vector are +ignored silently. ## `day_of_year()` -`day_of_year(v=vector(time()) instant-vector)` returns the day of the year for -each of the given times in UTC. Returned values are from 1 to 365 for non-leap years, -and 1 to 366 in leap years. +`day_of_year(v=vector(time()) instant-vector)` interpretes float samples in `v` +as timestamps (number of seconds since January 1, 1970 UTC) and returns the day +of the year (in UTC) for each of those timestamps. Returned values are from 1 +to 365 for non-leap years, and 1 to 366 in leap years. Histogram samples in the +input vector are ignored silently. ## `days_in_month()` -`days_in_month(v=vector(time()) instant-vector)` returns number of days in the -month for each of the given times in UTC. Returned values are from 28 to 31. +`days_in_month(v=vector(time()) instant-vector)` interpretes float samples in +`v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the +number of days in the month of each of those timestamps (in UTC). Returned +values are from 28 to 31. Histogram samples in the input vector are ignored silently. ## `delta()` @@ -150,36 +150,67 @@ between now and 2 hours ago: delta(cpu_temp_celsius{host="zeus"}[2h]) ``` -`delta` acts on native histograms by calculating a new histogram where each +`delta` acts on histogram samples by calculating a new histogram where each component (sum and count of observations, buckets) is the difference between -the respective component in the first and last native histogram in -`v`. However, each element in `v` that contains a mix of float and native -histogram samples within the range, will be missing from the result vector. +the respective component in the first and last native histogram in `v`. +However, each element in `v` that contains a mix of float samples and histogram +samples within the range will be omitted from the result vector, flagged by a +warn-level annotation. -`delta` should only be used with gauges and native histograms where the -components behave like gauges (so-called gauge histograms). +`delta` should only be used with gauges (for both floats and histograms). ## `deriv()` -`deriv(v range-vector)` calculates the per-second derivative of the time series in a range -vector `v`, using [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression). -The range vector must have at least two samples in order to perform the calculation. When `+Inf` or -`-Inf` are found in the range vector, the slope and offset value calculated will be `NaN`. +`deriv(v range-vector)` calculates the per-second derivative of each float time +series in the range vector `v`, using [simple linear +regression](https://en.wikipedia.org/wiki/Simple_linear_regression). The range +vector must have at least two float samples in order to perform the +calculation. When `+Inf` or `-Inf` are found in the range vector, the slope and +offset value calculated will be `NaN`. -`deriv` should only be used with gauges. +`deriv` should only be used with gauges and only works for float samples. +Elements in the range vector that contain only histogram samples are ignored +entirely. For elements that contain a mix of float and histogram samples, only +the float samples are used as input, which is flagged by an info-level +annotation. + +## `double_exponential_smoothing()` + +**This function has to be enabled via the [feature +flag](../feature_flags.md#experimental-promql-functions) +`--enable-feature=promql-experimental-functions`.** + +`double_exponential_smoothing(v range-vector, sf scalar, tf scalar)` produces a +smoothed value for each float time series in the range in `v`. The lower the +smoothing factor `sf`, the more importance is given to old data. The higher the +trend factor `tf`, the more trends in the data is considered. Both `sf` and +`tf` must be between 0 and 1. For additional details, refer to [NIST +Engineering Statistics +Handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc433.htm). In +Prometheus V2 this function was called `holt_winters`. This caused confusion +since the Holt-Winters method usually refers to triple exponential smoothing. +Double exponential smoothing as implemented here is also referred to as "Holt +Linear". + +`double_exponential_smoothing` should only be used with gauges and only works +for float samples. Elements in the range vector that contain only histogram +samples are ignored entirely. For elements that contain a mix of float and +histogram samples, only the float samples are used as input, which is flagged +by an info-level annotation. ## `exp()` -`exp(v instant-vector)` calculates the exponential function for all elements in `v`. -Special cases are: +`exp(v instant-vector)` calculates the exponential function for all float +samples in `v`. Histogram samples are ignored silently. Special cases are: * `Exp(+Inf) = +Inf` * `Exp(NaN) = NaN` ## `floor()` -`floor(v instant-vector)` rounds the sample values of all elements in `v` down -to the nearest integer value smaller than or equal to v. +`floor(v instant-vector)` returns a vector containing all float samples in the +input vector rounded down to the nearest integer value smaller than or equal +to their original value. Histogram samples in the input vector are ignored silently. * `floor(+Inf) = +Inf` * `floor(±0) = ±0` @@ -188,12 +219,8 @@ to the nearest integer value smaller than or equal to v. ## `histogram_avg()` -_This function only acts on native histograms, which are an experimental -feature. The behavior of this function may change in future versions of -Prometheus, including its removal from PromQL._ - -`histogram_avg(v instant-vector)` returns the arithmetic average of observed values stored in -a native histogram. Samples that are not native histograms are ignored and do +`histogram_avg(v instant-vector)` returns the arithmetic average of observed +values stored in each histogram sample in `v`. Float samples are ignored and do not show up in the returned vector. Use `histogram_avg` as demonstrated below to compute the average request duration @@ -209,36 +236,42 @@ Which is equivalent to the following query: ## `histogram_count()` and `histogram_sum()` -_Both functions only act on native histograms, which are an experimental -feature. The behavior of these functions may change in future versions of -Prometheus, including their removal from PromQL._ - `histogram_count(v instant-vector)` returns the count of observations stored in -a native histogram. Samples that are not native histograms are ignored and do -not show up in the returned vector. +each histogram sample in `v`. Float samples are ignored and do not show up in +the returned vector. Similarly, `histogram_sum(v instant-vector)` returns the sum of observations -stored in a native histogram. +stored in each histogram sample. Use `histogram_count` in the following way to calculate a rate of observations -(in this case corresponding to “requests per second”) from a native histogram: +(in this case corresponding to “requests per second”) from a series of +histogram samples: histogram_count(rate(http_request_duration_seconds[10m])) ## `histogram_fraction()` -_This function only acts on native histograms, which are an experimental -feature. The behavior of this function may change in future versions of -Prometheus, including its removal from PromQL._ +`histogram_fraction(lower scalar, upper scalar, b instant-vector)` returns the +estimated fraction of observations between the provided lower and upper values +for each classic or native histogram contained in `b`. Float samples in `b` are +considered the counts of observations in each bucket of one or more classic +histograms, while native histogram samples in `b` are treated each individually +as a separate histogram. This works in the same way as for `histogram_quantile()`. +(See there for more details.) -For a native histogram, `histogram_fraction(lower scalar, upper scalar, v -instant-vector)` returns the estimated fraction of observations between the -provided lower and upper values. Samples that are not native histograms are -ignored and do not show up in the returned vector. +If the provided lower and upper values do not coincide with bucket boundaries, +the calculated fraction is an estimate, using the same interpolation method as for +`histogram_quantile()`. (See there for more details.) Especially with classic +histograms, it is easy to accidentally pick lower or upper values that are very +far away from any bucket boundary, leading to large margins of error. Rather than +using `histogram_fraction()` with classic histograms, it is often a more robust approach +to directly act on the bucket series when calculating fractions. See the +[calculation of the Apdex scare](https://prometheus.io/docs/practices/histograms/#apdex-score) +as a typical example. For example, the following expression calculates the fraction of HTTP requests over the last hour that took 200ms or less: - + histogram_fraction(0, 0.2, rate(http_request_duration_seconds[1h])) The error of the estimation depends on the resolution of the underlying native @@ -253,12 +286,17 @@ observations less than or equal 0.2 would be `-Inf` rather than `0`. Whether the provided boundaries are inclusive or exclusive is only relevant if the provided boundaries are precisely aligned with bucket boundaries in the underlying native histogram. In this case, the behavior depends on the schema -definition of the histogram. The currently supported schemas all feature -inclusive upper boundaries and exclusive lower boundaries for positive values -(and vice versa for negative values). Without a precise alignment of -boundaries, the function uses linear interpolation to estimate the -fraction. With the resulting uncertainty, it becomes irrelevant if the -boundaries are inclusive or exclusive. +definition of the histogram. (The usual standard exponential schemas all +feature inclusive upper boundaries and exclusive lower boundaries for positive +values, and vice versa for negative values.) Without a precise alignment of +boundaries, the function uses interpolation to estimate the fraction. With the +resulting uncertainty, it becomes irrelevant if the boundaries are inclusive or +exclusive. + +Special case for native histograms with standard exponential buckets: +`NaN` observations are considered outside of any buckets in this case. +`histogram_fraction(-Inf, +Inf, b)` effectively returns the fraction of +non-`NaN` observations and may therefore be less than 1. ## `histogram_quantile()` @@ -270,10 +308,6 @@ summaries](https://prometheus.io/docs/practices/histograms) for a detailed explanation of φ-quantiles and the usage of the (classic) histogram metric type in general.) -_Note that native histograms are an experimental feature. The behavior of this -function when dealing with native histograms may change in future versions of -Prometheus._ - The float samples in `b` are considered the counts of observations in each bucket of one or more classic histograms. Each float sample must have a label `le` where the label value denotes the inclusive upper bound of the bucket. @@ -284,8 +318,8 @@ type](https://prometheus.io/docs/concepts/metric_types/#histogram) automatically provides time series with the `_bucket` suffix and the appropriate labels. -The native histogram samples in `b` are treated each individually as a separate -histogram to calculate the quantile from. +The (native) histogram samples in `b` are treated each individually as a +separate histogram to calculate the quantile from. As long as no naming collisions arise, `b` may contain a mix of classic and native histograms. @@ -313,7 +347,7 @@ included in the `by` clause. The following expression aggregates the 90th percentile by `job` for classic histograms: histogram_quantile(0.9, sum by (job, le) (rate(http_request_duration_seconds_bucket[10m]))) - + When aggregating native histograms, the expression simplifies to: histogram_quantile(0.9, sum by (job) (rate(http_request_duration_seconds[10m]))) @@ -326,92 +360,118 @@ With native histograms, aggregating everything works as usual without any `by` c histogram_quantile(0.9, sum(rate(http_request_duration_seconds[10m]))) -The `histogram_quantile()` function interpolates quantile values by -assuming a linear distribution within a bucket. +In the (common) case that a quantile value does not coincide with a bucket +boundary, the `histogram_quantile()` function interpolates the quantile value +within the bucket the quantile value falls into. For classic histograms, for +native histograms with custom bucket boundaries, and for the zero bucket of +other native histograms, it assumes a uniform distribution of observations +within the bucket (also called _linear interpolation_). For the +non-zero-buckets of native histograms with a standard exponential bucketing +schema, the interpolation is done under the assumption that the samples within +the bucket are distributed in a way that they would uniformly populate the +buckets in a hypothetical histogram with higher resolution. (This is also +called _exponential interpolation_. See the [native histogram +specification](https://prometheus.io/docs/specs/native_histograms/#interpolation-within-a-bucket) +for more details.) If `b` has 0 observations, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned. -The following is only relevant for classic histograms: If `b` contains -fewer than two buckets, `NaN` is returned. The highest bucket must have an -upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If a quantile is located -in the highest bucket, the upper bound of the second highest bucket is -returned. A lower limit of the lowest bucket is assumed to be 0 if the upper -bound of that bucket is greater than -0. In that case, the usual linear interpolation is applied within that -bucket. Otherwise, the upper bound of the lowest bucket is returned for -quantiles located in the lowest bucket. +Special cases for classic histograms: -You can use `histogram_quantile(0, v instant-vector)` to get the estimated minimum value stored in -a histogram. +* If `b` contains fewer than two buckets, `NaN` is returned. +* The highest bucket must have an upper bound of `+Inf`. (Otherwise, `NaN` is + returned.) +* If a quantile is located in the highest bucket, the upper bound of the second + highest bucket is returned. +* The lower limit of the lowest bucket is assumed to be 0 if the upper bound of + that bucket is greater than 0. In that case, the usual linear interpolation + is applied within that bucket. Otherwise, the upper bound of the lowest + bucket is returned for quantiles located in the lowest bucket. -You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in -a histogram. +Special cases for native histograms: -Buckets of classic histograms are cumulative. Therefore, the following should always be the case: +* If a native histogram with standard exponential buckets has `NaN` + observations and the quantile falls into one of the existing exponential + buckets, the result is skewed towards higher values due to `NaN` + observations treated as `+Inf`. This is flagged with an info level + annotation. +* If a native histogram with standard exponential buckets has `NaN` + observations and the quantile falls above all of the existing exponential + buckets, `NaN` is returned. This is flagged with an info level annotation. +* A zero bucket with finite width is assumed to contain no negative + observations if the histogram has observations in positive buckets, but none + in negative buckets. +* A zero bucket with finite width is assumed to contain no positive + observations if the histogram has observations in negative buckets, but none + in positive buckets. -* The counts in the buckets are monotonically increasing (strictly non-decreasing). -* A lack of observations between the upper limits of two consecutive buckets results in equal counts -in those two buckets. +You can use `histogram_quantile(0, v instant-vector)` to get the estimated +minimum value stored in a histogram. -However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets -with `sum(rate(...))`) or invalid data might violate these assumptions. In that case, -`histogram_quantile` would be unable to return meaningful results. To mitigate the issue, -`histogram_quantile` assumes that tiny relative differences between consecutive buckets are happening -because of floating point precision errors and ignores them. (The threshold to ignore a difference -between two buckets is a trillionth (1e-12) of the sum of both buckets.) Furthermore, if there are -non-monotonic bucket counts even after this adjustment, they are increased to the value of the -previous buckets to enforce monotonicity. The latter is evidence for an actual issue with the input -data and is therefore flagged with an informational annotation reading `input to histogram_quantile -needed to be fixed for monotonicity`. If you encounter this annotation, you should find and remove -the source of the invalid data. +You can use `histogram_quantile(1, v instant-vector)` to get the estimated +maximum value stored in a histogram. + +Buckets of classic histograms are cumulative. Therefore, the following should +always be the case: + +* The counts in the buckets are monotonically increasing (strictly + non-decreasing). +* A lack of observations between the upper limits of two consecutive buckets + results in equal counts in those two buckets. + +However, floating point precision issues (e.g. small discrepancies introduced +by computing of buckets with `sum(rate(...))`) or invalid data might violate +these assumptions. In that case, `histogram_quantile` would be unable to return +meaningful results. To mitigate the issue, `histogram_quantile` assumes that +tiny relative differences between consecutive buckets are happening because of +floating point precision errors and ignores them. (The threshold to ignore a +difference between two buckets is a trillionth (1e-12) of the sum of both +buckets.) Furthermore, if there are non-monotonic bucket counts even after this +adjustment, they are increased to the value of the previous buckets to enforce +monotonicity. The latter is evidence for an actual issue with the input data +and is therefore flagged by an info-level annotation reading `input to +histogram_quantile needed to be fixed for monotonicity`. If you encounter this +annotation, you should find and remove the source of the invalid data. ## `histogram_stddev()` and `histogram_stdvar()` -_Both functions only act on native histograms, which are an experimental -feature. The behavior of these functions may change in future versions of -Prometheus, including their removal from PromQL._ - `histogram_stddev(v instant-vector)` returns the estimated standard deviation -of observations in a native histogram, based on the geometric mean of the buckets -where the observations lie. Samples that are not native histograms are ignored and -do not show up in the returned vector. +of observations for each histogram sample in `v`. For this estimation, all observations +in a bucket are assumed to have the value of the mean of the bucket boundaries. For +the zero bucket and for buckets with custom boundaries, the arithmetic mean is used. +For the usual exponential buckets, the geometric mean is used. Float samples are ignored +and do not show up in the returned vector. Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard -variance of observations in a native histogram. - -## `holt_winters()` - -`holt_winters(v range-vector, sf scalar, tf scalar)` produces a smoothed value -for time series based on the range in `v`. The lower the smoothing factor `sf`, -the more importance is given to old data. The higher the trend factor `tf`, the -more trends in the data is considered. Both `sf` and `tf` must be between 0 and -1. - -`holt_winters` should only be used with gauges. +variance of observations for each histogram sample in `v`. ## `hour()` -`hour(v=vector(time()) instant-vector)` returns the hour of the day -for each of the given times in UTC. Returned values are from 0 to 23. +`hour(v=vector(time()) instant-vector)` interpretes float samples in `v` as +timestamps (number of seconds since January 1, 1970 UTC) and returns the hour +of the day (in UTC) for each of those timestamps. Returned values are from 0 +to 23. Histogram samples in the input vector are ignored silently. ## `idelta()` `idelta(v range-vector)` calculates the difference between the last two samples in the range vector `v`, returning an instant vector with the given deltas and -equivalent labels. +equivalent labels. Both samples must be either float samples or histogram +samples. Elements in `v` where one of the last two samples is a float sample +and the other is a histogram sample will be omitted from the result vector, +flagged by a warn-level annotation. -`idelta` should only be used with gauges. +`idelta` should only be used with gauges (for both floats and histograms). ## `increase()` -`increase(v range-vector)` calculates the increase in the -time series in the range vector. Breaks in monotonicity (such as counter -resets due to target restarts) are automatically adjusted for. The -increase is extrapolated to cover the full time range as specified -in the range vector selector, so that it is possible to get a -non-integer result even if a counter increases only by integer -increments. +`increase(v range-vector)` calculates the increase in the time series in the +range vector. Breaks in monotonicity (such as counter resets due to target +restarts) are automatically adjusted for. The increase is extrapolated to cover +the full time range as specified in the range vector selector, so that it is +possible to get a non-integer result even if a counter increases only by +integer increments. The following example expression returns the number of HTTP requests as measured over the last 5 minutes, per time series in the range vector: @@ -420,24 +480,121 @@ over the last 5 minutes, per time series in the range vector: increase(http_requests_total{job="api-server"}[5m]) ``` -`increase` acts on native histograms by calculating a new histogram where each -component (sum and count of observations, buckets) is the increase between -the respective component in the first and last native histogram in -`v`. However, each element in `v` that contains a mix of float and native -histogram samples within the range, will be missing from the result vector. +`increase` acts on histogram samples by calculating a new histogram where each +component (sum and count of observations, buckets) is the increase between the +respective component in the first and last native histogram in `v`. However, +each element in `v` that contains a mix of float samples and histogram samples +within the range, will be omitted from the result vector, flagged by a +warn-level annotation. -`increase` should only be used with counters and native histograms where the -components behave like counters. It is syntactic sugar for `rate(v)` multiplied -by the number of seconds under the specified time range window, and should be -used primarily for human readability. Use `rate` in recording rules so that -increases are tracked consistently on a per-second basis. +`increase` should only be used with counters (for both floats and histograms). +It is syntactic sugar for `rate(v)` multiplied by the number of seconds under +the specified time range window, and should be used primarily for human +readability. Use `rate` in recording rules so that increases are tracked +consistently on a per-second basis. + +## `info()` + +_The `info` function is an experiment to improve UX +around including labels from [info metrics](https://grafana.com/blog/2021/08/04/how-to-use-promql-joins-for-more-effective-queries-of-prometheus-metrics-at-scale/#info-metrics). +The behavior of this function may change in future versions of Prometheus, +including its removal from PromQL. `info` has to be enabled via the +[feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`._ + +`info(v instant-vector, [data-label-selector instant-vector])` finds, for each time +series in `v`, all info series with matching _identifying_ labels (more on +this later), and adds the union of their _data_ (i.e., non-identifying) labels +to the time series. The second argument `data-label-selector` is optional. +It is not a real instant vector, but uses a subset of its syntax. +It must start and end with curly braces (`{ ... }`) and may only contain label matchers. +The label matchers are used to constrain which info series to consider +and which data labels to add to `v`. + +Identifying labels of an info series are the subset of labels that uniquely +identify the info series. The remaining labels are considered +_data labels_ (also called non-identifying). (Note that Prometheus's concept +of time series identity always includes _all_ the labels. For the sake of the `info` +function, we “logically” define info series identity in a different way than +in the conventional Prometheus view.) The identifying labels of an info series +are used to join it to regular (non-info) series, i.e. those series that have +the same labels as the identifying labels of the info series. The data labels, which are +the ones added to the regular series by the `info` function, effectively encode +metadata key value pairs. (This implies that a change in the data labels +in the conventional Prometheus view constitutes the end of one info series and +the beginning of a new info series, while the “logical” view of the `info` function is +that the same info series continues to exist, just with different “data”.) + +The conventional approach of adding data labels is sometimes called a “join query”, +as illustrated by the following example: + +``` + rate(http_server_request_duration_seconds_count[2m]) +* on (job, instance) group_left (k8s_cluster_name) + target_info +``` + +The core of the query is the expression `rate(http_server_request_duration_seconds_count[2m])`. +But to add data labels from an info metric, the user has to use elaborate +(and not very obvious) syntax to specify which info metric to use (`target_info`), what the +identifying labels are (`on (job, instance)`), and which data labels to add +(`group_left (k8s_cluster_name)`). + +This query is not only verbose and hard to write, it might also run into an “identity crisis”: +If any of the data labels of `target_info` changes, Prometheus sees that as a change of series +(as alluded to above, Prometheus just has no native concept of non-identifying labels). +If the old `target_info` series is not properly marked as stale (which can happen with certain ingestion paths), +the query above will fail for up to 5m (the lookback delta) because it will find a conflicting +match with both the old and the new version of `target_info`. + +The `info` function not only resolves this conflict in favor of the newer series, it also simplifies the syntax +because it knows about the available info series and what their identifying labels are. The example query +looks like this with the `info` function: + +``` +info( + rate(http_server_request_duration_seconds_count[2m]), + {k8s_cluster_name=~".+"} +) +``` + +The common case of adding _all_ data labels can be achieved by +omitting the 2nd argument of the `info` function entirely, simplifying +the example even more: + +``` +info(rate(http_server_request_duration_seconds_count[2m])) +``` + +While `info` normally automatically finds all matching info series, it's possible to +restrict them by providing a `__name__` label matcher, e.g. +`{__name__="target_info"}`. + +### Limitations + +In its current iteration, `info` defaults to considering only info series with +the name `target_info`. It also assumes that the identifying info series labels are +`instance` and `job`. `info` does support other info series names however, through +`__name__` label matchers. E.g., one can explicitly say to consider both +`target_info` and `build_info` as follows: +`{__name__=~"(target|build)_info"}`. However, the identifying labels always +have to be `instance` and `job`. + +These limitations are partially defeating the purpose of the `info` function. +At the current stage, this is an experiment to find out how useful the approach +turns out to be in practice. A final version of the `info` function will indeed +consider all matching info series and with their appropriate identifying labels. ## `irate()` `irate(v range-vector)` calculates the per-second instant rate of increase of the time series in the range vector. This is based on the last two data points. Breaks in monotonicity (such as counter resets due to target restarts) are -automatically adjusted for. +automatically adjusted for. Both samples must be either float samples or +histogram samples. Elements in `v` where one of the last two samples is a float +sample and the other is a histogram sample will be omitted from the result +vector, flagged by a warn-level annotation. + +`irate` should only be used with counters (for both floats and histograms). The following example expression returns the per-second rate of HTTP requests looking up to 5 minutes back for the two most recent data points, per time @@ -475,7 +632,7 @@ label_join(up{job="api-server",src1="a",src2="b",src3="c"}, "foo", ",", "src1", ## `label_replace()` For each timeseries in `v`, `label_replace(v instant-vector, dst_label string, replacement string, src_label string, regex string)` -matches the [regular expression](https://github.com/google/re2/wiki/Syntax) `regex` against the value of the label `src_label`. If it +matches the [regular expression](./basics.md#regular-expressions) `regex` against the value of the label `src_label`. If it matches, the value of the label `dst_label` in the returned timeseries will be the expansion of `replacement`, together with the original labels in the input. Capturing groups in the regular expression can be referenced with `$1`, `$2`, etc. Named capturing groups in the regular expression can be referenced with `$name` (where `name` is the capturing group name). If the regular expression doesn't match then the timeseries is returned unchanged. @@ -495,8 +652,8 @@ label_replace(up{job="api-server",service="a:c"}, "foo", "$name", "service", "(? ## `ln()` -`ln(v instant-vector)` calculates the natural logarithm for all elements in `v`. -Special cases are: +`ln(v instant-vector)` calculates the natural logarithm for all float samples +in `v`. Histogram samples in the input vector are ignored silently. Special cases are: * `ln(+Inf) = +Inf` * `ln(0) = -Inf` @@ -505,35 +662,45 @@ Special cases are: ## `log2()` -`log2(v instant-vector)` calculates the binary logarithm for all elements in `v`. -The special cases are equivalent to those in `ln`. +`log2(v instant-vector)` calculates the binary logarithm for all float samples +in `v`. Histogram samples in the input vector are ignored silently. The special cases +are equivalent to those in `ln`. ## `log10()` -`log10(v instant-vector)` calculates the decimal logarithm for all elements in `v`. -The special cases are equivalent to those in `ln`. +`log10(v instant-vector)` calculates the decimal logarithm for all float +samples in `v`. Histogram samples in the input vector are ignored silently. The special +cases are equivalent to those in `ln`. ## `minute()` -`minute(v=vector(time()) instant-vector)` returns the minute of the hour for each -of the given times in UTC. Returned values are from 0 to 59. +`minute(v=vector(time()) instant-vector)` interpretes float samples in `v` as +timestamps (number of seconds since January 1, 1970 UTC) and returns the minute +of the hour (in UTC) for each of those timestamps. Returned values are from 0 +to 59. Histogram samples in the input vector are ignored silently. ## `month()` -`month(v=vector(time()) instant-vector)` returns the month of the year for each -of the given times in UTC. Returned values are from 1 to 12, where 1 means -January etc. +`month(v=vector(time()) instant-vector)` interpretes float samples in `v` as +timestamps (number of seconds since January 1, 1970 UTC) and returns the month +of the year (in UTC) for each of those timestamps. Returned values are from 1 +to 12, where 1 means January etc. Histogram samples in the input vector are +ignored silently. ## `predict_linear()` `predict_linear(v range-vector, t scalar)` predicts the value of time series `t` seconds from now, based on the range vector `v`, using [simple linear -regression](https://en.wikipedia.org/wiki/Simple_linear_regression). -The range vector must have at least two samples in order to perform the -calculation. When `+Inf` or `-Inf` are found in the range vector, -the slope and offset value calculated will be `NaN`. +regression](https://en.wikipedia.org/wiki/Simple_linear_regression). The range +vector must have at least two float samples in order to perform the +calculation. When `+Inf` or `-Inf` are found in the range vector, the predicted +value will be `NaN`. -`predict_linear` should only be used with gauges. +`predict_linear` should only be used with gauges and only works for float +samples. Elements in the range vector that contain only histogram samples are +ignored entirely. For elements that contain a mix of float and histogram +samples, only the float samples are used as input, which is flagged by an +info-level annotation. ## `rate()` @@ -543,7 +710,7 @@ resets due to target restarts) are automatically adjusted for. Also, the calculation extrapolates to the ends of the time range, allowing for missed scrapes or imperfect alignment of scrape cycles with the range's time period. -The following example expression returns the per-second rate of HTTP requests as measured +The following example expression returns the per-second average rate of HTTP requests over the last 5 minutes, per time series in the range vector: ``` @@ -552,13 +719,13 @@ rate(http_requests_total{job="api-server"}[5m]) `rate` acts on native histograms by calculating a new histogram where each component (sum and count of observations, buckets) is the rate of increase -between the respective component in the first and last native histogram in -`v`. However, each element in `v` that contains a mix of float and native -histogram samples within the range, will be missing from the result vector. +between the respective component in the first and last native histogram in `v`. +However, each element in `v` that contains a mix of float and native histogram +samples within the range, will be omitted from the result vector, flagged by a +warn-level annotation. -`rate` should only be used with counters and native histograms where the -components behave like counters. It is best suited for alerting, and for -graphing of slow-moving counters. +`rate` should only be used with counters (for both floats and histograms). It +is best suited for alerting, and for graphing of slow-moving counters. Note that when combining `rate()` with an aggregation operator (e.g. `sum()`) or a function aggregating over time (any function ending in `_over_time`), @@ -573,17 +740,15 @@ decrease in the value between two consecutive float samples is interpreted as a counter reset. A reset in a native histogram is detected in a more complex way: Any decrease in any bucket, including the zero bucket, or in the count of observation constitutes a counter reset, but also the disappearance of any -previously populated bucket, an increase in bucket resolution, or a decrease of -the zero-bucket width. +previously populated bucket, a decrease of the zero-bucket width, or any schema +change that is not a compatible decrease of resolution. -`resets` should only be used with counters and counter-like native -histograms. +`resets` should only be used with counters (for both floats and histograms). -If the range vector contains a mix of float and histogram samples for the same -series, counter resets are detected separately and their numbers added up. The -change from a float to a histogram sample is _not_ considered a counter -reset. Each float sample is compared to the next float sample, and each -histogram is comprared to the next histogram. +A float sample followed by a histogram sample, or vice versa, counts as a +reset. A counter histogram sample followed by a gauge histogram sample, or vice +versa, also counts as a reset (but note that `resets` should not be used on +gauges in the first place, see above). ## `round()` @@ -591,53 +756,63 @@ histogram is comprared to the next histogram. elements in `v` to the nearest integer. Ties are resolved by rounding up. The optional `to_nearest` argument allows specifying the nearest multiple to which the sample values should be rounded. This multiple may also be a fraction. +Histogram samples in the input vector are ignored silently. ## `scalar()` -Given a single-element input vector, `scalar(v instant-vector)` returns the -sample value of that single element as a scalar. If the input vector does not -have exactly one element, `scalar` will return `NaN`. +Given an input vector that contains only one element with a float sample, +`scalar(v instant-vector)` returns the sample value of that float sample as a +scalar. If the input vector does not have exactly one element with a float +sample, `scalar` will return `NaN`. Histogram samples in the input vector are +ignored silently. ## `sgn()` -`sgn(v instant-vector)` returns a vector with all sample values converted to their sign, defined as this: 1 if v is positive, -1 if v is negative and 0 if v is equal to zero. +`sgn(v instant-vector)` returns a vector with all float sample values converted +to their sign, defined as this: 1 if v is positive, -1 if v is negative and 0 +if v is equal to zero. Histogram samples in the input vector are ignored silently. ## `sort()` -`sort(v instant-vector)` returns vector elements sorted by their sample values, -in ascending order. Native histograms are sorted by their sum of observations. +`sort(v instant-vector)` returns vector elements sorted by their float sample +values, in ascending order. Histogram samples in the input vector are ignored silently. -Please note that `sort` only affects the results of instant queries, as range query results always have a fixed output ordering. +Please note that `sort` only affects the results of instant queries, as range +query results always have a fixed output ordering. ## `sort_desc()` Same as `sort`, but sorts in descending order. -Like `sort`, `sort_desc` only affects the results of instant queries, as range query results always have a fixed output ordering. - ## `sort_by_label()` -**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature +flag](../feature_flags.md#experimental-promql-functions) +`--enable-feature=promql-experimental-functions`.** -`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by their label values and sample value in case of label values being equal, in ascending order. +`sort_by_label(v instant-vector, label string, ...)` returns vector elements +sorted by the values of the given labels in ascending order. In case these +label values are equal, elements are sorted by their full label sets. +`sort_by_label` acts on float and histogram samples in the same way. -Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering. +Please note that `sort_by_label` only affect the results of instant queries, as +range query results always have a fixed output ordering. -This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_sort_order). +`sort_by_label` uses [natural sort +order](https://en.wikipedia.org/wiki/Natural_sort_order). ## `sort_by_label_desc()` -**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature +flag](../feature_flags.md#experimental-promql-functions) +`--enable-feature=promql-experimental-functions`.** Same as `sort_by_label`, but sorts in descending order. -Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering. - -This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_sort_order). - ## `sqrt()` -`sqrt(v instant-vector)` calculates the square root of all elements in `v`. +`sqrt(v instant-vector)` calculates the square root of all float samples in +`v`. Histogram samples in the input vector are ignored silently. ## `time()` @@ -648,66 +823,86 @@ expression is to be evaluated. ## `timestamp()` `timestamp(v instant-vector)` returns the timestamp of each of the samples of -the given vector as the number of seconds since January 1, 1970 UTC. It also -works with histogram samples. +the given vector as the number of seconds since January 1, 1970 UTC. It acts on +float and histogram samples in the same way. ## `vector()` -`vector(s scalar)` returns the scalar `s` as a vector with no labels. +`vector(s scalar)` converts the scalar `s` to a float sample and returns it as +a single-element instant vector with no labels. ## `year()` -`year(v=vector(time()) instant-vector)` returns the year -for each of the given times in UTC. +`year(v=vector(time()) instant-vector)` returns the year for each of the given +times in UTC. Histogram samples in the input vector are ignored silently. ## `_over_time()` The following functions allow aggregating each series of a given range vector over time and return an instant vector with per-series aggregation results: -* `avg_over_time(range-vector)`: the average value of all points in the specified interval. -* `min_over_time(range-vector)`: the minimum value of all points in the specified interval. -* `max_over_time(range-vector)`: the maximum value of all points in the specified interval. -* `sum_over_time(range-vector)`: the sum of all values in the specified interval. -* `count_over_time(range-vector)`: the count of all values in the specified interval. -* `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified interval. -* `stddev_over_time(range-vector)`: the population standard deviation of the values in the specified interval. -* `stdvar_over_time(range-vector)`: the population standard variance of the values in the specified interval. -* `last_over_time(range-vector)`: the most recent point value in the specified interval. +* `avg_over_time(range-vector)`: the average value of all float or histogram samples in the specified interval (see details below). +* `min_over_time(range-vector)`: the minimum value of all float samples in the specified interval. +* `max_over_time(range-vector)`: the maximum value of all float samples in the specified interval. +* `sum_over_time(range-vector)`: the sum of all float or histogram samples in the specified interval (see details below). +* `count_over_time(range-vector)`: the count of all samples in the specified interval. +* `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the specified interval. +* `stddev_over_time(range-vector)`: the population standard deviation of all float samples in the specified interval. +* `stdvar_over_time(range-vector)`: the population standard variance of all float samples in the specified interval. +* `last_over_time(range-vector)`: the most recent sample in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval. -If the [feature flag](../feature_flags/) +If the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions` is set, the following additional functions are available: -* `mad_over_time(range-vector)`: the median absolute deviation of all points in the specified interval. +* `mad_over_time(range-vector)`: the median absolute deviation of all float + samples in the specified interval. +* `ts_of_min_over_time(range-vector)`: the timestamp of the last float sample + that has the minimum value of all float samples in the specified interval. +* `ts_of_max_over_time(range-vector)`: the timestamp of the last float sample + that has the maximum value of all float samples in the specified interval. +* `ts_of_last_over_time(range-vector)`: the timestamp of last sample in the + specified interval. Note that all values in the specified interval have the same weight in the aggregation even if the values are not equally spaced throughout the interval. -`avg_over_time`, `sum_over_time`, `count_over_time`, `last_over_time`, and -`present_over_time` handle native histograms as expected. All other functions -ignore histogram samples. +These functions act on histograms in the following way: + +- `count_over_time`, `last_over_time`, and `present_over_time()` act on float + and histogram samples in the same way. +- `avg_over_time()` and `sum_over_time()` act on histogram samples in a way + that corresponds to the respective aggregation operators. If a series + contains a mix of float samples and histogram samples within the range, the + corresponding result is removed entirely from the output vector. Such a + removal is flagged by a warn-level annotation. +- All other functions ignore histogram samples in the following way: Input + ranges containing only histogram samples are silently removed from the + output. For ranges with a mix of histogram and float samples, only the float + samples are processed and the omission of the histogram samples is flagged by + an info-level annotation. ## Trigonometric Functions -The trigonometric functions work in radians: +The trigonometric functions work in radians. They ignore histogram samples in +the input vector. -* `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)). -* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)). -* `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)). -* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)). -* `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)). -* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)). -* `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)). -* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)). -* `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)). -* `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)). -* `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)). -* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)). +* `acos(v instant-vector)`: calculates the arccosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Acos)). +* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Acosh)). +* `asin(v instant-vector)`: calculates the arcsine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Asin)). +* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Asinh)). +* `atan(v instant-vector)`: calculates the arctangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Atan)). +* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Atanh)). +* `cos(v instant-vector)`: calculates the cosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Cos)). +* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Cosh)). +* `sin(v instant-vector)`: calculates the sine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Sin)). +* `sinh(v instant-vector)`: calculates the hyperbolic sine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Sinh)). +* `tan(v instant-vector)`: calculates the tangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Tan)). +* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Tanh)). The following are useful for converting between degrees and radians: -* `deg(v instant-vector)`: converts radians to degrees for all elements in `v`. +* `deg(v instant-vector)`: converts radians to degrees for all float samples in `v`. * `pi()`: returns pi. -* `rad(v instant-vector)`: converts degrees to radians for all elements in `v`. +* `rad(v instant-vector)`: converts degrees to radians for all float samples in `v`. diff --git a/docs/querying/operators.md b/docs/querying/operators.md index f5f217ff63..f8bddc8bda 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -3,8 +3,6 @@ title: Operators sort_rank: 2 --- -# Operators - ## Binary operators Prometheus's query language supports basic logical and arithmetic operators. @@ -23,22 +21,62 @@ The following binary arithmetic operators exist in Prometheus: * `^` (power/exponentiation) Binary arithmetic operators are defined between scalar/scalar, vector/scalar, -and vector/vector value pairs. +and vector/vector value pairs. They follow the usual [IEEE 754 floating point +arithmetic](https://en.wikipedia.org/wiki/IEEE_754), including the handling of +special values like `NaN`, `+Inf`, and `-Inf`. -**Between two scalars**, the behavior is obvious: they evaluate to another +**Between two scalars**, the behavior is straightforward: they evaluate to another scalar that is the result of the operator applied to both scalar operands. **Between an instant vector and a scalar**, the operator is applied to the -value of every data sample in the vector. E.g. if a time series instant vector -is multiplied by 2, the result is another vector in which every sample value of -the original vector is multiplied by 2. The metric name is dropped. +value of every data sample in the vector. + +If the data sample is a float, the operation is performed between that float and the scalar. +For example, if an instant vector of float samples is multiplied by 2, +the result is another vector of float samples in which every sample value of +the original vector is multiplied by 2. + +For vector elements that are histogram samples, the behavior is the +following: + +* For `*`, all bucket populations and the count and the sum of observations + are multiplied by the scalar. + +* For `/`, the histogram sample has to be on the left hand side (LHS), followed + by the scalar on the right hand side (RHS). All bucket populations and the count + and the sum of observations are then divided by the scalar. A division by zero + results in a histogram with no regular buckets and the zero bucket population + and the count and sum of observations all set to `+Inf`, `-Inf`, or `NaN`, depending + on their values in the input histogram (positive, negative, or zero/`NaN`, respectively). + +* For `/` with a scalar on the LHS and a histogram sample on the RHS, and similarly for all + other arithmetic binary operators in any combination of a scalar and a + histogram sample, there is no result and the corresponding element is removed + from the resulting vector. Such a removal is flagged by an info-level + annotation. **Between two instant vectors**, a binary arithmetic operator is applied to -each entry in the left-hand side vector and its [matching element](#vector-matching) -in the right-hand vector. The result is propagated into the result vector with the -grouping labels becoming the output label set. The metric name is dropped. Entries -for which no matching entry in the right-hand vector can be found are not part of -the result. +each entry in the LHS vector and its [matching element](#vector-matching) in +the RHS vector. The result is propagated into the result vector with the +grouping labels becoming the output label set. Entries for which no matching +entry in the right-hand vector can be found are not part of the result. + +If two float samples are matched, the arithmetic operator is applied to the two input values. + +If a float sample is matched with a histogram sample, the behavior follows the same +logic as between a scalar and a histogram sample (see above), i.e. `*` and `/` +(the latter with the histogram sample on the LHS) are valid operations, while all +others lead to the removal of the corresponding element from the resulting vector. + +If two histogram samples are matched, only `+` and `-` are valid operations, each +adding or subtracting all matching bucket populations and the count and the +sum of observations. All other operations result in the removal of the +corresponding element from the output vector, flagged by an info-level +annotation. + +**In any arithmetic binary operation involving vectors**, the metric name is +dropped. This occurs even if `__name__` is explicitly mentioned in `on` +(see https://github.com/prometheus/prometheus/issues/16631 for further discussion). ### Trigonometric binary operators @@ -46,9 +84,12 @@ The following trigonometric binary operators, which work in radians, exist in Pr * `atan2` (based on https://pkg.go.dev/math#Atan2) -Trigonometric operators allow trigonometric functions to be executed on two vectors using -vector matching, which isn't available with normal functions. They act in the same manner -as arithmetic operators. +Trigonometric operators allow trigonometric functions to be executed on two +vectors using vector matching, which isn't available with normal functions. +They act in the same manner as arithmetic operators. They only operate on float +samples. Operations involving histogram samples result in the removal of the +corresponding vector elements from the output vector, flagged by an +info-level annotation. ### Comparison binary operators @@ -72,20 +113,42 @@ operators result in another scalar that is either `0` (`false`) or `1` **Between an instant vector and a scalar**, these operators are applied to the value of every data sample in the vector, and vector elements between which the -comparison result is `false` get dropped from the result vector. If the `bool` -modifier is provided, vector elements that would be dropped instead have the value -`0` and vector elements that would be kept have the value `1`. The metric name -is dropped if the `bool` modifier is provided. +comparison result is false get dropped from the result vector. These +operations only work with float samples in the vector. For histogram samples, +the corresponding element is removed from the result vector, flagged by an +info-level annotation. **Between two instant vectors**, these operators behave as a filter by default, applied to matching entries. Vector elements for which the expression is not true or which do not find a match on the other side of the expression get dropped from the result, while the others are propagated into a result vector -with the grouping labels becoming the output label set. -If the `bool` modifier is provided, vector elements that would have been -dropped instead have the value `0` and vector elements that would be kept have -the value `1`, with the grouping labels again becoming the output label set. -The metric name is dropped if the `bool` modifier is provided. +with the grouping labels becoming the output label set. + +Matches between two float samples work as usual. + +Matches between a float sample and a histogram sample are invalid, and the +corresponding element is removed from the result vector, flagged by an info-level +annotation. + +Between two histogram samples, `==` and `!=` work as expected, but all other +comparison binary operations are again invalid. + +**In any comparison binary operation involving vectors**, providing the `bool` +modifier changes the behavior in the following ways: + +* Vector elements which find a match on the other side of the expression but for + which the expression is false instead have the value `0` and vector elements + that do find a match and for which the expression is true have the value `1`. + (Note that elements with no match or invalid operations involving histogram + samples still return no result rather than the value `0`.) +* The metric name is dropped. + +If the `bool` modifier is not provided, then the metric name from the left side +is retained, with some exceptions: + +* If `on` is used, then the metric name is dropped. +* If `group_right` is used, then the metric name from the right side is retained, + to avoid collisions. ### Logical/set binary operators @@ -108,6 +171,9 @@ which do not have matching label sets in `vector1`. `vector1` for which there are no elements in `vector2` with exactly matching label sets. All matching elements in both vectors are dropped. +As these logical/set binary operators do not interact with the sample values, +they work in the same way for float samples and histogram samples. + ## Vector matching Operations between vectors attempt to find a matching element in the right-hand side @@ -219,19 +285,20 @@ used to aggregate the elements of a single instant vector, resulting in a new vector of fewer elements with aggregated values: * `sum` (calculate sum over dimensions) +* `avg` (calculate the arithmetic average over dimensions) * `min` (select minimum over dimensions) * `max` (select maximum over dimensions) -* `avg` (calculate the average over dimensions) +* `bottomk` (smallest _k_ elements by sample value) +* `topk` (largest _k_ elements by sample value) +* `limitk` (sample _k_ elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`) +* `limit_ratio` (sample a pseudo-random ratio _r_ of elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`) * `group` (all values in the resulting vector are 1) -* `stddev` (calculate population standard deviation over dimensions) -* `stdvar` (calculate population standard variance over dimensions) * `count` (count number of elements in the vector) * `count_values` (count number of elements with the same value) -* `bottomk` (smallest k elements by sample value) -* `topk` (largest k elements by sample value) + +* `stddev` (calculate population standard deviation over dimensions) +* `stdvar` (calculate population standard variance over dimensions) * `quantile` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions) -* `limitk` (sample n elements) -* `limit_ratio` (sample elements with approximately 𝑟 ratio if `𝑟 > 0`, and the complement of such samples if `𝑟 = -(1.0 - 𝑟)`) These operators can either be used to aggregate over **all** label dimensions or preserve distinct dimensions by including a `without` or `by` clause. These @@ -251,29 +318,74 @@ all other labels are preserved in the output. `by` does the opposite and drops labels that are not listed in the `by` clause, even if their label values are identical between all elements of the vector. -`parameter` is only required for `count_values`, `quantile`, `topk`, -`bottomk`, `limitk` and `limit_ratio`. +`parameter` is only required for `topk`, `bottomk`, `limitk`, `limit_ratio`, +`quantile`, and `count_values`. It is used as the value for _k_, _r_, φ, or the +name of the additional label, respectively. + +### Detailed explanations + +`sum` sums up sample values in the same way as the `+` binary operator does +between two values. Similarly, `avg` divides the sum by the number of +aggregated samples in the same way as the `/` binary operator. Therefore, all +sample values aggregation into a single resulting vector element must either be +float samples or histogram samples. An aggregation of a mix of both is invalid, +resulting in the removeal of the corresponding vector element from the output +vector, flagged by a warn-level annotation. + +`min` and `max` only operate on float samples, following IEEE 754 floating +point arithmetic, which in particular implies that `NaN` is only ever +considered a minimum or maximum if all aggregated values are `NaN`. Histogram +samples in the input vector are ignored, flagged by an info-level annotation. + +`topk` and `bottomk` are different from other aggregators in that a subset of +the input samples, including the original labels, are returned in the result +vector. `by` and `without` are only used to bucket the input vector. Similar to +`min` and `max`, they only operate on float samples, considering `NaN` values +to be farthest from the top or bottom, respectively. Histogram samples in the +input vector are ignored, flagged by an info-level annotation. + +If used in an instant query, `topk` and `bottomk` return series ordered by +value in descending or ascending order, respectively. If used with `by` or +`without`, then series within each bucket are sorted by value, and series in +the same bucket are returned consecutively, but there is no guarantee that +buckets of series will be returned in any particular order. No sorting applies +to range queries. + +`limitk` and `limit_ratio` also return a subset of the input samples, including +the original labels in the result vector. The subset is selected in a +deterministic pseudo-random way. `limitk` picks _k_ samples, while +`limit_ratio` picks a ratio _r_ of samples (each determined by `parameter`). +This happens independent of the sample type. Therefore, it works for both float +samples and histogram samples. _r_ can be between +1 and -1. The absolute value +of _r_ is used as the selection ratio, but the selection order is inverted for +a negative _r_, which can be used to select complements. For example, +`limit_ratio(0.1, ...)` returns a deterministic set of approximatiely 10% of +the input samples, while `limit_ratio(-0.9, ...)` returns precisely the +remaining approximately 90% of the input samples not returned by +`limit_ratio(0.1, ...)`. + +`group` and `count` do not interact with the sample values, +they work in the same way for float samples and histogram samples. `count_values` outputs one time series per unique sample value. Each series has an additional label. The name of that label is given by the aggregation parameter, and the label value is the unique sample value. The value of each time series is the number of times that sample value was present. +`count_values` works with both float samples and histogram samples. For the +latter, a compact string representation of the histogram sample value is used +as the label value. -`topk` and `bottomk` are different from other aggregators in that a subset of -the input samples, including the original labels, are returned in the result -vector. `by` and `without` are only used to bucket the input vector. - -`limitk` and `limit_ratio` also return a subset of the input samples, -including the original labels in the result vector, these are experimental -operators that must be enabled with `--enable-feature=promql-experimental-functions`. +`stddev` and `stdvar` only work with float samples, following IEEE 754 floating +point arithmetic. Histogram samples in the input vector are ignored, flagged by +an info-level annotation. `quantile` calculates the φ-quantile, the value that ranks at number φ*N among the N metric values of the dimensions aggregated over. φ is provided as the aggregation parameter. For example, `quantile(0.5, ...)` calculates the median, -`quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. +`quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. +For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. - -Example: +### Examples If the metric `http_requests_total` had time series that fan out by `application`, `instance`, and `group` labels, we could calculate the total @@ -303,28 +415,6 @@ could write: limitk(10, http_requests_total) -To deterministically sample approximately 10% of timeseries we could write: - - limit_ratio(0.1, http_requests_total) - -Given that `limit_ratio()` implements a deterministic sampling algorithm (based -on labels' hash), you can get the _complement_ of the above samples, i.e. -approximately 90%, but precisely those not returned by `limit_ratio(0.1, ...)` -with: - - limit_ratio(-0.9, http_requests_total) - -You can also use this feature to e.g. verify that `avg()` is a representative -aggregation for your samples' values, by checking that the difference between -averaging two samples' subsets is "small" when compared to the standard -deviation. - - abs( - avg(limit_ratio(0.5, http_requests_total)) - - - avg(limit_ratio(-0.5, http_requests_total)) - ) <= bool stddev(http_requests_total) - ## Binary operator precedence The following list shows the precedence of binary operators in Prometheus, from @@ -340,35 +430,3 @@ highest to lowest. Operators on the same precedence level are left-associative. For example, `2 * 3 % 2` is equivalent to `(2 * 3) % 2`. However `^` is right associative, so `2 ^ 3 ^ 2` is equivalent to `2 ^ (3 ^ 2)`. - -## Operators for native histograms - -Native histograms are an experimental feature. Ingesting native histograms has -to be enabled via a [feature flag](../../feature_flags.md#native-histograms). Once -native histograms have been ingested, they can be queried (even after the -feature flag has been disabled again). However, the operator support for native -histograms is still very limited. - -Logical/set binary operators work as expected even if histogram samples are -involved. They only check for the existence of a vector element and don't -change their behavior depending on the sample type of an element (float or -histogram). The `count` aggregation operator works similarly. - -The binary `+` and `-` operators between two native histograms and the `sum` -and `avg` aggregation operators to aggregate native histograms are fully -supported. Even if the histograms involved have different bucket layouts, the -buckets are automatically converted appropriately so that the operation can be -performed. (With the currently supported bucket schemas, that's always -possible.) If either operator has to aggregate a mix of histogram samples and -float samples, the corresponding vector element is removed from the output -vector entirely. - -The binary `*` operator works between a native histogram and a float in any -order, while the binary `/` operator can be used between a native histogram -and a float in that exact order. - -All other operators (and unmentioned cases for the above operators) do not -behave in a meaningful way. They either treat the histogram sample as if it -were a float sample of value 0, or (in case of arithmetic operations between a -scalar and a vector) they leave the histogram sample unchanged. This behavior -will change to a meaningful one before native histograms are a stable feature. diff --git a/docs/querying/remote_read_api.md b/docs/querying/remote_read_api.md index efbd08e984..1930e1fbae 100644 --- a/docs/querying/remote_read_api.md +++ b/docs/querying/remote_read_api.md @@ -3,9 +3,7 @@ title: Remote Read API sort_rank: 7 --- -# Remote Read API - -> This is not currently considered part of the stable API and is subject to change even between non-major version releases of Prometheus. +NOTE: This is not currently considered part of the stable API and is subject to change even between non-major version releases of Prometheus. This API provides data read functionality from Prometheus. This interface expects [snappy](https://github.com/google/snappy) compression. The API definition is located [here](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto). @@ -15,11 +13,12 @@ Request are made to the following endpoint. /api/v1/read ``` -### Samples +## Samples -This returns a message that includes a list of raw samples. +This returns a message that includes a list of raw samples matching the +requested query. -### Streamed Chunks +## Streamed Chunks These streamed chunks utilize an XOR algorithm inspired by the [Gorilla](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) compression to encode the chunks. However, it provides resolution to the millisecond instead of to the second. diff --git a/docs/stability.md b/docs/stability.md index 1fd2e51e0c..82974f8f70 100644 --- a/docs/stability.md +++ b/docs/stability.md @@ -1,15 +1,14 @@ --- -title: API Stability +title: API stability guarantees +nav_title: API stability sort_rank: 11 --- -# API Stability Guarantees - Prometheus promises API stability within a major version, and strives to avoid breaking changes for key features. Some features, which are cosmetic, still under development, or depend on 3rd party services, are not covered by this. -Things considered stable for 2.x: +Things considered stable for 3.x: * The query language and data model * Alerting and recording rules @@ -18,21 +17,25 @@ Things considered stable for 2.x: * Configuration file format (minus the service discovery remote read/write, see below) * Rule/alert file format * Console template syntax and semantics -* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/). +* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/) and receiving +* Agent mode +* OTLP receiver endpoint -Things considered unstable for 2.x: +Things considered unstable for 3.x: * Any feature listed as experimental or subject to change, including: - * The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458) - * Remote write receiving, remote read and the remote read endpoint + * The [`double_exponential_smoothing` PromQL function](https://github.com/prometheus/prometheus/issues/2458) + * Remote read and the remote read endpoint * Server-side HTTPS and basic authentication -* Service discovery integrations, with the exception of `static_configs` and `file_sd_configs` +* Service discovery integrations, with the exception of `static_configs`, `file_sd_configs` and `http_sd_config` * Go APIs of packages that are part of the server * HTML generated by the web UI * The metrics in the /metrics endpoint of Prometheus itself * Exact on-disk format. Potential changes however, will be forward compatible and transparently handled by Prometheus * The format of the logs +Prometheus 2.x stability guarantees can be found [in the 2.x documentation](https://prometheus.io/docs/prometheus/2.55/stability/). + As long as you are not using any features marked as experimental/unstable, an upgrade within a major version can usually be performed without any operational adjustments and very little risk that anything will break. Any breaking changes diff --git a/docs/storage.md b/docs/storage.md index 55d4309d37..f472cce140 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -3,8 +3,6 @@ title: Storage sort_rank: 5 --- -# Storage - Prometheus includes a local on-disk time series database, but also optionally integrates with remote storage systems. ## Local storage @@ -61,10 +59,10 @@ A Prometheus server's data directory looks something like this: Note that a limitation of local storage is that it is not clustered or replicated. Thus, it is not arbitrarily scalable or durable in the face of drive or node outages and should be managed like any other single node -database. +database. -[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups -made without snapshots run the risk of losing data that was recorded since +[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups +made without snapshots run the risk of losing data that was recorded since the last WAL sync, which typically happens every two hours. With proper architecture, it is possible to retain years of data in local storage. @@ -75,22 +73,21 @@ performance, and efficiency. For further details on file format, see [TSDB format](/tsdb/docs/format/README.md). -## Compaction +### Compaction The initial two-hour blocks are eventually compacted into longer blocks in the background. Compaction will create larger blocks containing data spanning up to 10% of the retention time, or 31 days, whichever is smaller. -## Operational aspects +### Operational aspects Prometheus has several flags that configure local storage. The most important are: - `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`. -- `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is - set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention` - nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`. - Supported units: y, w, d, h, m, s, ms. +- `--storage.tsdb.retention.time`: How long to retain samples in storage. If neither + this flag nor `storage.tsdb.retention.size` is set, the retention time defaults to + `15d`. Supported units: y, w, d, h, m, s, ms. - `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. The oldest data will be removed first. Defaults to `0` or disabled. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only @@ -98,7 +95,6 @@ Prometheus has several flags that configure local storage. The most important ar chunks are counted in the total size. So the minimum requirement for the disk is the peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head` (m-mapped Head chunks) directory combined (peaks every 2 hours). -- `--storage.tsdb.retention`: Deprecated in favor of `storage.tsdb.retention.time`. - `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL). Depending on your data, you can expect the WAL size to be halved with little extra cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. @@ -117,13 +113,12 @@ time series you scrape (fewer targets or fewer series per target), or you can increase the scrape interval. However, reducing the number of series is likely more effective, due to compression of samples within a series. -If your local storage becomes corrupted for whatever reason, the best -strategy to address the problem is to shut down Prometheus then remove the -entire storage directory. You can also try removing individual block directories, -or the WAL directory to resolve the problem. Note that this means losing -approximately two hours data per block directory. Again, Prometheus's local -storage is not intended to be durable long-term storage; external solutions -offer extended retention and data durability. +If your local storage becomes corrupted to the point where Prometheus will not +start it is recommended to backup the storage directory and restore the +corrupted block directories from your backups. If you do not have backups the +last resort is to remove the corrupted files. For example you can try removing +individual block directories or the write-ahead-log (wal) files. Note that this +means losing the data for the time range those blocks or wal covers. CAUTION: Non-POSIX compliant filesystems are not supported for Prometheus' local storage as unrecoverable corruptions may happen. NFS filesystems @@ -137,16 +132,16 @@ will be used. Expired block cleanup happens in the background. It may take up to two hours to remove expired blocks. Blocks must be fully expired before they are removed. -## Right-Sizing Retention Size +### Right-Sizing Retention Size -If you are utilizing `storage.tsdb.retention.size` to set a size limit, you -will want to consider the right size for this value relative to the storage you -have allocated for Prometheus. It is wise to reduce the retention size to provide -a buffer, ensuring that older entries will be removed before the allocated storage +If you are utilizing `storage.tsdb.retention.size` to set a size limit, you +will want to consider the right size for this value relative to the storage you +have allocated for Prometheus. It is wise to reduce the retention size to provide +a buffer, ensuring that older entries will be removed before the allocated storage for Prometheus becomes full. -At present, we recommend setting the retention size to, at most, 80-85% of your -allocated Prometheus disk space. This increases the likelihood that older entires +At present, we recommend setting the retention size to, at most, 80-85% of your +allocated Prometheus disk space. This increases the likelihood that older entries will be removed prior to hitting any disk limitations. ## Remote storage integrations @@ -157,31 +152,27 @@ a set of interfaces that allow integrating with remote storage systems. ### Overview -Prometheus integrates with remote storage systems in three ways: +Prometheus integrates with remote storage systems in four ways: -- Prometheus can write samples that it ingests to a remote URL in a standardized format. -- Prometheus can receive samples from other Prometheus servers in a standardized format. -- Prometheus can read (back) sample data from a remote URL in a standardized format. +- Prometheus can write samples that it ingests to a remote URL in a [Remote Write format](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +- Prometheus can receive samples from other clients in a [Remote Write format](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +- Prometheus can read (back) sample data from a remote URL in a [Remote Read format](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto#L31). +- Prometheus can return sample data requested by clients in a [Remote Read format](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto#L31). ![Remote read and write architecture](images/remote_integrations.png) -The read and write protocols both use a snappy-compressed protocol buffer encoding over -HTTP. The protocols are not considered as stable APIs yet and may change to use gRPC -over HTTP/2 in the future, when all hops between Prometheus and the remote storage can -safely be assumed to support HTTP/2. +The remote read and write protocols both use a snappy-compressed protocol buffer encoding over +HTTP. The read protocol is not yet considered as stable API. -For details on configuring remote storage integrations in Prometheus, see the +The write protocol has a [stable specification for 1.0 version](https://prometheus.io/docs/specs/remote_write_spec/) +and [experimental specification for 2.0 version](https://prometheus.io/docs/specs/remote_write_spec_2_0/), +both supported by Prometheus server. + +For details on configuring remote storage integrations in Prometheus as a client, see the [remote write](configuration/configuration.md#remote_write) and [remote read](configuration/configuration.md#remote_read) sections of the Prometheus configuration documentation. -The built-in remote write receiver can be enabled by setting the -`--web.enable-remote-write-receiver` command line flag. When enabled, -the remote write receiver endpoint is `/api/v1/write`. - -For details on the request and response messages, see the -[remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto). - Note that on the read path, Prometheus only fetches raw series data for a set of label selectors and time ranges from the remote end. All PromQL evaluation on the raw data still happens in Prometheus itself. This means that remote read queries @@ -189,6 +180,11 @@ have some scalability limit, since all necessary data needs to be loaded into th querying Prometheus server first and then processed there. However, supporting fully distributed evaluation of PromQL was deemed infeasible for the time being. +Prometheus also serves both protocols. The built-in remote write receiver can be enabled +by setting the `--web.enable-remote-write-receiver` command line flag. When enabled, +the remote write receiver endpoint is `/api/v1/write`. The remote read endpoint is +available on [`/api/v1/read`](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/). + ### Existing integrations To learn more about existing integrations with remote storage systems, see the diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go index 8ccbafe6f1..128132a8d2 100644 --- a/documentation/examples/custom-sd/adapter-usage/main.go +++ b/documentation/examples/custom-sd/adapter-usage/main.go @@ -18,6 +18,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "os" @@ -26,10 +27,9 @@ import ( "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" prom_discovery "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -41,7 +41,7 @@ var ( a = kingpin.New("sd adapter usage", "Tool to generate file_sd target files for unimplemented SD mechanisms.") outputFile = a.Flag("output.file", "Output file for file_sd compatible file.").Default("custom_sd.json").String() listenAddress = a.Flag("listen.address", "The address the Consul HTTP API is listening on for requests.").Default("localhost:8500").String() - logger log.Logger + logger *slog.Logger // addressLabel is the name for the label containing a target's address. addressLabel = model.MetaLabelPrefix + "consul_address" @@ -90,7 +90,7 @@ type discovery struct { address string refreshInterval int tagSeparator string - logger log.Logger + logger *slog.Logger oldSourceList map[string]bool } @@ -164,7 +164,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { var srvs map[string][]string resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address)) if err != nil { - level.Error(d.logger).Log("msg", "Error getting services list", "err", err) + d.logger.Error("Error getting services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) continue } @@ -173,7 +173,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { io.Copy(io.Discard, resp.Body) resp.Body.Close() if err != nil { - level.Error(d.logger).Log("msg", "Error reading services list", "err", err) + d.logger.Error("Error reading services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) continue } @@ -181,7 +181,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { err = json.Unmarshal(b, &srvs) resp.Body.Close() if err != nil { - level.Error(d.logger).Log("msg", "Error parsing services list", "err", err) + d.logger.Error("Error parsing services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) continue } @@ -200,13 +200,13 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/service/%s", d.address, name)) if err != nil { - level.Error(d.logger).Log("msg", "Error getting services nodes", "service", name, "err", err) + d.logger.Error("Error getting services nodes", "service", name, "err", err) break } tg, err := d.parseServiceNodes(resp, name) if err != nil { - level.Error(d.logger).Log("msg", "Error parsing services nodes", "service", name, "err", err) + d.logger.Error("Error parsing services nodes", "service", name, "err", err) break } tgs = append(tgs, tg) @@ -254,8 +254,7 @@ func main() { fmt.Println("err: ", err) return } - logger = log.NewSyncLogger(log.NewLogfmtLogger(os.Stdout)) - logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + logger = promslog.New(&promslog.Config{}) ctx := context.Background() @@ -272,7 +271,7 @@ func main() { } if err != nil { - level.Error(logger).Log("msg", "failed to create discovery metrics", "err", err) + logger.Error("failed to create discovery metrics", "err", err) os.Exit(1) } @@ -280,7 +279,7 @@ func main() { refreshMetrics := prom_discovery.NewRefreshMetrics(reg) metrics, err := prom_discovery.RegisterSDMetrics(reg, refreshMetrics) if err != nil { - level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err) + logger.Error("failed to register service discovery metrics", "err", err) os.Exit(1) } diff --git a/documentation/examples/custom-sd/adapter/adapter.go b/documentation/examples/custom-sd/adapter/adapter.go index dcf5a2b78c..b242c4eaa0 100644 --- a/documentation/examples/custom-sd/adapter/adapter.go +++ b/documentation/examples/custom-sd/adapter/adapter.go @@ -18,13 +18,12 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "os" "path/filepath" "reflect" "sort" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -55,7 +54,7 @@ type Adapter struct { manager *discovery.Manager output string name string - logger log.Logger + logger *slog.Logger } func mapToArray(m map[string]*customSD) []customSD { @@ -106,7 +105,7 @@ func (a *Adapter) refreshTargetGroups(allTargetGroups map[string][]*targetgroup. a.groups = tempGroups err := a.writeOutput() if err != nil { - level.Error(log.With(a.logger, "component", "sd-adapter")).Log("err", err) + a.logger.With("component", "sd-adapter").Error("failed to write output", "err", err) } } } @@ -163,7 +162,7 @@ func (a *Adapter) Run() { } // NewAdapter creates a new instance of Adapter. -func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter { +func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger *slog.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter { return &Adapter{ ctx: ctx, disc: d, diff --git a/documentation/examples/prometheus-otlp.yml b/documentation/examples/prometheus-otlp.yml new file mode 100644 index 0000000000..07b1fa54b1 --- /dev/null +++ b/documentation/examples/prometheus-otlp.yml @@ -0,0 +1,31 @@ +# my global config +global: + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + +otlp: + # Recommended attributes to be promoted to labels. + promote_resource_attributes: + - service.instance.id + - service.name + - service.namespace + - cloud.availability_zone + - cloud.region + - container.name + - deployment.environment.name + - k8s.cluster.name + - k8s.container.name + - k8s.cronjob.name + - k8s.daemonset.name + - k8s.deployment.name + - k8s.job.name + - k8s.namespace.name + - k8s.pod.name + - k8s.replicaset.name + - k8s.statefulset.name + # Ingest OTLP data keeping all characters in metric/label names. + translation_strategy: NoUTF8EscapingWithSuffixes + +storage: + # OTLP is a push-based protocol, Out of order samples is a common scenario. + tsdb: + out_of_order_time_window: 30m diff --git a/documentation/examples/prometheus-ovhcloud.yml b/documentation/examples/prometheus-ovhcloud.yml index 21facad1ca..b2cc60af25 100644 --- a/documentation/examples/prometheus-ovhcloud.yml +++ b/documentation/examples/prometheus-ovhcloud.yml @@ -1,4 +1,4 @@ -# An example scrape configuration for running Prometheus with Ovhcloud. +# An example scrape configuration for running Prometheus with OVHcloud. scrape_configs: - job_name: 'ovhcloud' ovhcloud_sd_configs: diff --git a/documentation/examples/prometheus-stackit.yml b/documentation/examples/prometheus-stackit.yml new file mode 100644 index 0000000000..623cb231ff --- /dev/null +++ b/documentation/examples/prometheus-stackit.yml @@ -0,0 +1,34 @@ +# A example scrape configuration for running Prometheus with +# STACKIT. + +scrape_configs: + # Make Prometheus scrape itself for metrics. + - job_name: "prometheus" + static_configs: + - targets: ["localhost:9090"] + + # Discover Node Exporter instances to scrape. + - job_name: "node" + + stackit_sd_configs: + - project: 11111111-1111-1111-1111-111111111111 + authorization: + credentials: "" + relabel_configs: + # Use the public IPv4 and port 9100 to scrape the target. + - source_labels: [__meta_stackit_public_ipv4] + target_label: __address__ + replacement: "$1:9100" + + # Discover Node Exporter instances to scrape using a STACKIT Subnet called mynet. + - job_name: "node_private" + + stackit_sd_configs: + - project: 11111111-1111-1111-1111-111111111111 + authorization: + credentials: "" + relabel_configs: + # Use the private IPv4 within the STACKIT Subnet and port 9100 to scrape the target. + - source_labels: [__meta_stackit_private_ipv4_mynet] + target_label: __address__ + replacement: "$1:9100" diff --git a/documentation/examples/prometheus.yml b/documentation/examples/prometheus.yml index 312b578aad..9a45049c62 100644 --- a/documentation/examples/prometheus.yml +++ b/documentation/examples/prometheus.yml @@ -27,3 +27,6 @@ scrape_configs: static_configs: - targets: ["localhost:9090"] + # The label name is added as a label `label_name=` to any timeseries scraped from this config. + labels: + app: "prometheus" diff --git a/documentation/examples/remote_storage/example_write_adapter/README.md b/documentation/examples/remote_storage/example_write_adapter/README.md index 739cf3be36..968d2b25cb 100644 --- a/documentation/examples/remote_storage/example_write_adapter/README.md +++ b/documentation/examples/remote_storage/example_write_adapter/README.md @@ -19,7 +19,7 @@ remote_write: protobuf_message: "io.prometheus.write.v2.Request" ``` -or for deprecated Remote Write 1.0 message: +or for the eventually deprecated Remote Write 1.0 message: ```yaml remote_write: diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 348b27dfc7..aa95d2faa8 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -1,76 +1,79 @@ module github.com/prometheus/prometheus/documentation/examples/remote_storage -go 1.21.0 +go 1.23.0 require ( github.com/alecthomas/kingpin/v2 v2.4.0 - github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 - github.com/golang/snappy v0.0.4 - github.com/influxdata/influxdb v1.11.5 - github.com/prometheus/client_golang v1.19.1 - github.com/prometheus/common v0.55.0 - github.com/prometheus/prometheus v0.52.1 - github.com/stretchr/testify v1.9.0 + github.com/golang/snappy v1.0.0 + github.com/influxdata/influxdb-client-go/v2 v2.14.0 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/common v0.65.0 + github.com/prometheus/prometheus v1.99.0 + github.com/stretchr/testify v1.10.0 ) require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect - github.com/aws/aws-sdk-go v1.53.16 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/golang-jwt/jwt/v5 v5.2.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect + github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.8 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/oapi-codegen/runtime v1.0.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - go.opentelemetry.io/collector/pdata v1.8.0 // indirect - go.opentelemetry.io/collector/semconv v0.101.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/pdata v1.31.0 // indirect + go.opentelemetry.io/collector/semconv v0.125.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/time v0.5.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/crypto v0.38.0 // indirect + golang.org/x/net v0.40.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.25.0 // indirect + golang.org/x/time v0.7.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect + google.golang.org/grpc v1.72.0 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apimachinery v0.29.3 // indirect - k8s.io/client-go v0.29.3 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/apimachinery v0.32.3 // indirect + k8s.io/client-go v0.32.3 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect ) exclude ( diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 9898d75d70..fd0d676adf 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,9 +1,9 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= @@ -14,6 +14,7 @@ github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -23,23 +24,25 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= -github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -50,8 +53,6 @@ github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LO github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -62,10 +63,11 @@ github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= @@ -74,6 +76,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -85,16 +89,16 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= -github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -103,8 +107,8 @@ github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -117,8 +121,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -126,8 +130,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -167,10 +171,10 @@ github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI= -github.com/influxdata/influxdb v1.11.5/go.mod h1:k8sWREQl1/9t46VrkrH5adUM4UNGIt206ipO3plbkw8= +github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4= +github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -186,12 +190,13 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -235,6 +240,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oapi-codegen/runtime v1.0.0 h1:P4rqFX5fMFWqRzY9M/3YF9+aPSPPB06IzP2P7oOxrWo= +github.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18B34OO356yJ/A= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -249,6 +256,8 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -256,19 +265,19 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -279,8 +288,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc= github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -288,6 +297,7 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= @@ -296,26 +306,34 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= -go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= -go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= -go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/pdata v1.31.0 h1:P5WuLr1l2JcIvr6Dw2hl01ltp2ZafPnC4Isv+BLTBqU= +go.opentelemetry.io/collector/pdata v1.31.0/go.mod h1:m41io9nWpy7aCm/uD1L9QcKiZwOP0ldj83JEA34dmlk= +go.opentelemetry.io/collector/semconv v0.125.0 h1:SyRP617YGvNSWRSKMy7Lbk9RaJSR+qFAAfyxJOeZe4s= +go.opentelemetry.io/collector/semconv v0.125.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -326,8 +344,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -347,20 +365,20 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -376,37 +394,37 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0= +google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= +google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= +google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -414,13 +432,15 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= @@ -436,21 +456,21 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/documentation/examples/remote_storage/remote_storage_adapter/README.md b/documentation/examples/remote_storage/remote_storage_adapter/README.md index ce0735bff5..5f37c4d9eb 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/README.md +++ b/documentation/examples/remote_storage/remote_storage_adapter/README.md @@ -31,7 +31,7 @@ OpenTSDB example: InfluxDB example: ``` -./remote_storage_adapter --influxdb-url=http://localhost:8086/ --influxdb.database=prometheus --influxdb.retention-policy=autogen +INFLUXDB_AUTH_TOKEN= ./remote_storage_adapter --influxdb-url=http://localhost:8086/ --influxdb.organization= --influxdb.bucket= ``` To show all flags: diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go index 36242a8f4d..b02560dbab 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go @@ -16,19 +16,19 @@ package graphite import ( "bytes" "fmt" + "log/slog" "math" "net" "sort" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" ) // Client allows sending batches of Prometheus samples to Graphite. type Client struct { - logger log.Logger + logger *slog.Logger address string transport string @@ -37,9 +37,9 @@ type Client struct { } // NewClient creates a new Client. -func NewClient(logger log.Logger, address, transport string, timeout time.Duration, prefix string) *Client { +func NewClient(logger *slog.Logger, address, transport string, timeout time.Duration, prefix string) *Client { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return &Client{ logger: logger, @@ -93,7 +93,7 @@ func (c *Client) Write(samples model.Samples) error { t := float64(s.Timestamp.UnixNano()) / 1e9 v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "Cannot send value to Graphite, skipping sample", "value", v, "sample", s) + c.logger.Debug("Cannot send value to Graphite, skipping sample", "value", v, "sample", s) continue } fmt.Fprintf(&buf, "%s %f %f\n", k, v, t) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index e84ed9e129..a91b27360e 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -14,56 +14,61 @@ package influxdb import ( - "encoding/json" + "context" "errors" "fmt" + "log/slog" "math" - "os" "strings" + "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - influx "github.com/influxdata/influxdb/client/v2" + influx "github.com/influxdata/influxdb-client-go/v2" + "github.com/influxdata/influxdb-client-go/v2/api/query" + "github.com/influxdata/influxdb-client-go/v2/api/write" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/prompb" ) // Client allows sending batches of Prometheus samples to InfluxDB. type Client struct { - logger log.Logger + logger *slog.Logger - client influx.Client - database string - retentionPolicy string - ignoredSamples prometheus.Counter + client influx.Client + organization string + bucket string + ignoredSamples prometheus.Counter + + context context.Context } // NewClient creates a new Client. -func NewClient(logger log.Logger, conf influx.HTTPConfig, db, rp string) *Client { - c, err := influx.NewHTTPClient(conf) - // Currently influx.NewClient() *should* never return an error. - if err != nil { - level.Error(logger).Log("err", err) - os.Exit(1) - } +func NewClient(logger *slog.Logger, url, authToken, organization, bucket string) *Client { + c := influx.NewClientWithOptions( + url, + authToken, + influx.DefaultOptions().SetPrecision(time.Millisecond), + ) if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return &Client{ - logger: logger, - client: c, - database: db, - retentionPolicy: rp, + logger: logger, + client: c, + organization: organization, + bucket: bucket, ignoredSamples: prometheus.NewCounter( prometheus.CounterOpts{ Name: "prometheus_influxdb_ignored_samples_total", Help: "The total number of samples not sent to InfluxDB due to unsupported float values (Inf, -Inf, NaN).", }, ), + + context: context.Background(), } } @@ -80,39 +85,41 @@ func tagsFromMetric(m model.Metric) map[string]string { // Write sends a batch of samples to InfluxDB via its HTTP API. func (c *Client) Write(samples model.Samples) error { - points := make([]*influx.Point, 0, len(samples)) + points := make([]*write.Point, 0, len(samples)) for _, s := range samples { v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "Cannot send to InfluxDB, skipping sample", "value", v, "sample", s) + c.logger.Debug("Cannot send to InfluxDB, skipping sample", "value", v, "sample", s) c.ignoredSamples.Inc() continue } - p, err := influx.NewPoint( + p := influx.NewPoint( string(s.Metric[model.MetricNameLabel]), tagsFromMetric(s.Metric), map[string]interface{}{"value": v}, s.Timestamp.Time(), ) - if err != nil { - return err - } points = append(points, p) } - bps, err := influx.NewBatchPoints(influx.BatchPointsConfig{ - Precision: "ms", - Database: c.database, - RetentionPolicy: c.retentionPolicy, - }) - if err != nil { + writeAPI := c.client.WriteAPIBlocking(c.organization, c.bucket) + writeAPI.EnableBatching() // default 5_000 + var err error + for _, p := range points { + if err = writeAPI.WritePoint(c.context, p); err != nil { + return err + } + } + if err = writeAPI.Flush(c.context); err != nil { return err } - bps.AddPoints(points) - return c.client.Write(bps) + + return nil } func (c *Client) Read(req *prompb.ReadRequest) (*prompb.ReadResponse, error) { + queryAPI := c.client.QueryAPI(c.organization) + labelsToSeries := map[string]*prompb.TimeSeries{} for _, q := range req.Queries { command, err := c.buildCommand(q) @@ -120,17 +127,18 @@ func (c *Client) Read(req *prompb.ReadRequest) (*prompb.ReadResponse, error) { return nil, err } - query := influx.NewQuery(command, c.database, "ms") - resp, err := c.client.Query(query) + resp, err := queryAPI.Query(c.context, command) if err != nil { return nil, err } - if resp.Err != "" { - return nil, errors.New(resp.Err) + if resp.Err() != nil { + return nil, resp.Err() } - if err = mergeResult(labelsToSeries, resp.Results); err != nil { - return nil, err + for resp.Next() { + if err = mergeResult(labelsToSeries, resp.Record()); err != nil { + return nil, err + } } } @@ -146,17 +154,20 @@ func (c *Client) Read(req *prompb.ReadRequest) (*prompb.ReadResponse, error) { } func (c *Client) buildCommand(q *prompb.Query) (string, error) { - matchers := make([]string, 0, len(q.Matchers)) + rangeInNs := fmt.Sprintf("start: time(v: %v), stop: time(v: %v)", q.StartTimestampMs*time.Millisecond.Nanoseconds(), q.EndTimestampMs*time.Millisecond.Nanoseconds()) + // If we don't find a metric name matcher, query all metrics // (InfluxDB measurements) by default. - from := "FROM /.+/" + measurement := `r._measurement` + matchers := make([]string, 0, len(q.Matchers)) + var joinedMatchers string for _, m := range q.Matchers { if m.Name == model.MetricNameLabel { switch m.Type { case prompb.LabelMatcher_EQ: - from = fmt.Sprintf("FROM %q.%q", c.retentionPolicy, m.Value) + measurement += fmt.Sprintf(" == \"%s\"", m.Value) case prompb.LabelMatcher_RE: - from = fmt.Sprintf("FROM %q./^%s$/", c.retentionPolicy, escapeSlashes(m.Value)) + measurement += fmt.Sprintf(" =~ /%s/", escapeSlashes(m.Value)) default: // TODO: Figure out how to support these efficiently. return "", errors.New("non-equal or regex-non-equal matchers are not supported on the metric name yet") @@ -166,21 +177,28 @@ func (c *Client) buildCommand(q *prompb.Query) (string, error) { switch m.Type { case prompb.LabelMatcher_EQ: - matchers = append(matchers, fmt.Sprintf("%q = '%s'", m.Name, escapeSingleQuotes(m.Value))) + matchers = append(matchers, fmt.Sprintf("r.%s == \"%s\"", m.Name, escapeSingleQuotes(m.Value))) case prompb.LabelMatcher_NEQ: - matchers = append(matchers, fmt.Sprintf("%q != '%s'", m.Name, escapeSingleQuotes(m.Value))) + matchers = append(matchers, fmt.Sprintf("r.%s != \"%s\"", m.Name, escapeSingleQuotes(m.Value))) case prompb.LabelMatcher_RE: - matchers = append(matchers, fmt.Sprintf("%q =~ /^%s$/", m.Name, escapeSlashes(m.Value))) + matchers = append(matchers, fmt.Sprintf("r.%s =~ /%s/", m.Name, escapeSingleQuotes(m.Value))) case prompb.LabelMatcher_NRE: - matchers = append(matchers, fmt.Sprintf("%q !~ /^%s$/", m.Name, escapeSlashes(m.Value))) + matchers = append(matchers, fmt.Sprintf("r.%s !~ /%s/", m.Name, escapeSingleQuotes(m.Value))) default: return "", fmt.Errorf("unknown match type %v", m.Type) } } - matchers = append(matchers, fmt.Sprintf("time >= %vms", q.StartTimestampMs)) - matchers = append(matchers, fmt.Sprintf("time <= %vms", q.EndTimestampMs)) + if len(matchers) > 0 { + joinedMatchers = fmt.Sprintf(" and %s", strings.Join(matchers, " and ")) + } - return fmt.Sprintf("SELECT value %s WHERE %v GROUP BY *", from, strings.Join(matchers, " AND ")), nil + // _measurement must be retained, otherwise "invalid metric name" shall be thrown + command := fmt.Sprintf( + "from(bucket: \"%s\") |> range(%s) |> filter(fn: (r) => %s%s)", + c.bucket, rangeInNs, measurement, joinedMatchers, + ) + + return command, nil } func escapeSingleQuotes(str string) string { @@ -191,44 +209,60 @@ func escapeSlashes(str string) string { return strings.ReplaceAll(str, `/`, `\/`) } -func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, results []influx.Result) error { - for _, r := range results { - for _, s := range r.Series { - k := concatLabels(s.Tags) - ts, ok := labelsToSeries[k] - if !ok { - ts = &prompb.TimeSeries{ - Labels: tagsToLabelPairs(s.Name, s.Tags), - } - labelsToSeries[k] = ts - } +func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, record *query.FluxRecord) error { + builtIntime := record.Time() + builtInvalue := record.Value() + builtInMeasurement := record.Measurement() + labels := record.Values() - samples, err := valuesToSamples(s.Values) - if err != nil { - return err - } + filterOutBuiltInLabels(labels) - ts.Samples = mergeSamples(ts.Samples, samples) + k := concatLabels(labels) + + ts, ok := labelsToSeries[k] + if !ok { + ts = &prompb.TimeSeries{ + Labels: tagsToLabelPairs(builtInMeasurement, labels), } + labelsToSeries[k] = ts } + + sample, err := valuesToSamples(builtIntime, builtInvalue) + if err != nil { + return err + } + + ts.Samples = mergeSamples(ts.Samples, []prompb.Sample{sample}) + return nil } -func concatLabels(labels map[string]string) string { +func filterOutBuiltInLabels(labels map[string]interface{}) { + delete(labels, "table") + delete(labels, "_start") + delete(labels, "_stop") + delete(labels, "_time") + delete(labels, "_value") + delete(labels, "_field") + delete(labels, "result") + delete(labels, "_measurement") +} + +func concatLabels(labels map[string]interface{}) string { // 0xff cannot occur in valid UTF-8 sequences, so use it // as a separator here. separator := "\xff" pairs := make([]string, 0, len(labels)) for k, v := range labels { - pairs = append(pairs, k+separator+v) + pairs = append(pairs, fmt.Sprintf("%s%s%v", k, separator, v)) } return strings.Join(pairs, separator) } -func tagsToLabelPairs(name string, tags map[string]string) []prompb.Label { +func tagsToLabelPairs(name string, tags map[string]interface{}) []prompb.Label { pairs := make([]prompb.Label, 0, len(tags)) for k, v := range tags { - if v == "" { + if v == nil { // If we select metrics with different sets of labels names, // InfluxDB returns *all* possible tag names on all returned // series, with empty tag values on series where they don't @@ -239,7 +273,7 @@ func tagsToLabelPairs(name string, tags map[string]string) []prompb.Label { } pairs = append(pairs, prompb.Label{ Name: k, - Value: v, + Value: fmt.Sprintf("%v", v), }) } pairs = append(pairs, prompb.Label{ @@ -249,39 +283,22 @@ func tagsToLabelPairs(name string, tags map[string]string) []prompb.Label { return pairs } -func valuesToSamples(values [][]interface{}) ([]prompb.Sample, error) { - samples := make([]prompb.Sample, 0, len(values)) - for _, v := range values { - if len(v) != 2 { - return nil, fmt.Errorf("bad sample tuple length, expected [, ], got %v", v) - } - - jsonTimestamp, ok := v[0].(json.Number) +func valuesToSamples(timestamp time.Time, value interface{}) (prompb.Sample, error) { + var valueFloat64 float64 + var valueInt64 int64 + var ok bool + if valueFloat64, ok = value.(float64); !ok { + valueInt64, ok = value.(int64) if !ok { - return nil, fmt.Errorf("bad timestamp: %v", v[0]) + return prompb.Sample{}, fmt.Errorf("unable to convert sample value to float64: %v", value) } - - jsonValue, ok := v[1].(json.Number) - if !ok { - return nil, fmt.Errorf("bad sample value: %v", v[1]) - } - - timestamp, err := jsonTimestamp.Int64() - if err != nil { - return nil, fmt.Errorf("unable to convert sample timestamp to int64: %w", err) - } - - value, err := jsonValue.Float64() - if err != nil { - return nil, fmt.Errorf("unable to convert sample value to float64: %w", err) - } - - samples = append(samples, prompb.Sample{ - Timestamp: timestamp, - Value: value, - }) + valueFloat64 = float64(valueInt64) } - return samples, nil + + return prompb.Sample{ + Timestamp: timestamp.UnixMilli(), + Value: valueFloat64, + }, nil } // mergeSamples merges two lists of sample pairs and removes duplicate diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go index a738c01dcd..f78d4db794 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go @@ -20,9 +20,7 @@ import ( "net/http/httptest" "net/url" "testing" - "time" - influx "github.com/influxdata/influxdb/client/v2" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ) @@ -69,13 +67,14 @@ func TestClient(t *testing.T) { } expectedBody := `testmetric,test_label=test_label_value1 value=1.23 123456789123 + testmetric,test_label=test_label_value2 value=5.1234 123456789123 ` server := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { + func(_ http.ResponseWriter, r *http.Request) { require.Equal(t, http.MethodPost, r.Method, "Unexpected method.") - require.Equal(t, "/write", r.URL.Path, "Unexpected path.") + require.Equal(t, "/api/v2/write", r.URL.Path, "Unexpected path.") b, err := io.ReadAll(r.Body) require.NoError(t, err, "Error reading body.") require.Equal(t, expectedBody, string(b), "Unexpected request body.") @@ -86,13 +85,7 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123 serverURL, err := url.Parse(server.URL) require.NoError(t, err, "Unable to parse server URL.") - conf := influx.HTTPConfig{ - Addr: serverURL.String(), - Username: "testuser", - Password: "testpass", - Timeout: time.Minute, - } - c := NewClient(nil, conf, "test_db", "default") + c := NewClient(nil, serverURL.String(), "auth_token", "test_organization", "test_bucket") err = c.Write(samples) require.NoError(t, err, "Error sending samples.") } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/main.go b/documentation/examples/remote_storage/remote_storage_adapter/main.go index bb348aba7f..ffcbb5385a 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/main.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/main.go @@ -17,6 +17,7 @@ package main import ( "fmt" "io" + "log/slog" "net/http" _ "net/http/pprof" "net/url" @@ -26,16 +27,13 @@ import ( "time" "github.com/alecthomas/kingpin/v2" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" - influx "github.com/influxdata/influxdb/client/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/model" - "github.com/prometheus/common/promlog" - "github.com/prometheus/common/promlog/flag" + "github.com/prometheus/common/promslog" + "github.com/prometheus/common/promslog/flag" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/influxdb" @@ -45,19 +43,18 @@ import ( ) type config struct { - graphiteAddress string - graphiteTransport string - graphitePrefix string - opentsdbURL string - influxdbURL string - influxdbRetentionPolicy string - influxdbUsername string - influxdbDatabase string - influxdbPassword string - remoteTimeout time.Duration - listenAddr string - telemetryPath string - promlogConfig promlog.Config + graphiteAddress string + graphiteTransport string + graphitePrefix string + opentsdbURL string + influxdbURL string + bucket string + organization string + influxdbAuthToken string + remoteTimeout time.Duration + listenAddr string + telemetryPath string + promslogConfig promslog.Config } var ( @@ -105,11 +102,11 @@ func main() { cfg := parseFlags() http.Handle(cfg.telemetryPath, promhttp.Handler()) - logger := promlog.New(&cfg.promlogConfig) + logger := promslog.New(&cfg.promslogConfig) writers, readers := buildClients(logger, cfg) if err := serve(logger, cfg.listenAddr, writers, readers); err != nil { - level.Error(logger).Log("msg", "Failed to listen", "addr", cfg.listenAddr, "err", err) + logger.Error("Failed to listen", "addr", cfg.listenAddr, "err", err) os.Exit(1) } } @@ -119,8 +116,8 @@ func parseFlags() *config { a.HelpFlag.Short('h') cfg := &config{ - influxdbPassword: os.Getenv("INFLUXDB_PW"), - promlogConfig: promlog.Config{}, + influxdbAuthToken: os.Getenv("INFLUXDB_AUTH_TOKEN"), + promslogConfig: promslog.Config{}, } a.Flag("graphite-address", "The host:port of the Graphite server to send samples to. None, if empty."). @@ -133,12 +130,10 @@ func parseFlags() *config { Default("").StringVar(&cfg.opentsdbURL) a.Flag("influxdb-url", "The URL of the remote InfluxDB server to send samples to. None, if empty."). Default("").StringVar(&cfg.influxdbURL) - a.Flag("influxdb.retention-policy", "The InfluxDB retention policy to use."). - Default("autogen").StringVar(&cfg.influxdbRetentionPolicy) - a.Flag("influxdb.username", "The username to use when sending samples to InfluxDB. The corresponding password must be provided via the INFLUXDB_PW environment variable."). - Default("").StringVar(&cfg.influxdbUsername) - a.Flag("influxdb.database", "The name of the database to use for storing samples in InfluxDB."). - Default("prometheus").StringVar(&cfg.influxdbDatabase) + a.Flag("influxdb.bucket", "The InfluxDB bucket to use."). + Default("").StringVar(&cfg.bucket) + a.Flag("influxdb.organization", "The name of the organization to use for storing samples in InfluxDB."). + Default("").StringVar(&cfg.organization) a.Flag("send-timeout", "The timeout to use when sending samples to the remote storage."). Default("30s").DurationVar(&cfg.remoteTimeout) a.Flag("web.listen-address", "Address to listen on for web endpoints."). @@ -146,11 +141,11 @@ func parseFlags() *config { a.Flag("web.telemetry-path", "Address to listen on for web endpoints."). Default("/metrics").StringVar(&cfg.telemetryPath) - flag.AddFlags(a, &cfg.promlogConfig) + flag.AddFlags(a, &cfg.promslogConfig) _, err := a.Parse(os.Args[1:]) if err != nil { - fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err)) + fmt.Fprintf(os.Stderr, "Error parsing commandline arguments: %s", err) a.Usage(os.Args[1:]) os.Exit(2) } @@ -168,19 +163,19 @@ type reader interface { Name() string } -func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) { +func buildClients(logger *slog.Logger, cfg *config) ([]writer, []reader) { var writers []writer var readers []reader if cfg.graphiteAddress != "" { c := graphite.NewClient( - log.With(logger, "storage", "Graphite"), + logger.With("storage", "Graphite"), cfg.graphiteAddress, cfg.graphiteTransport, cfg.remoteTimeout, cfg.graphitePrefix) writers = append(writers, c) } if cfg.opentsdbURL != "" { c := opentsdb.NewClient( - log.With(logger, "storage", "OpenTSDB"), + logger.With("storage", "OpenTSDB"), cfg.opentsdbURL, cfg.remoteTimeout, ) @@ -189,34 +184,29 @@ func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) { if cfg.influxdbURL != "" { url, err := url.Parse(cfg.influxdbURL) if err != nil { - level.Error(logger).Log("msg", "Failed to parse InfluxDB URL", "url", cfg.influxdbURL, "err", err) + logger.Error("Failed to parse InfluxDB URL", "url", cfg.influxdbURL, "err", err) os.Exit(1) } - conf := influx.HTTPConfig{ - Addr: url.String(), - Username: cfg.influxdbUsername, - Password: cfg.influxdbPassword, - Timeout: cfg.remoteTimeout, - } c := influxdb.NewClient( - log.With(logger, "storage", "InfluxDB"), - conf, - cfg.influxdbDatabase, - cfg.influxdbRetentionPolicy, + logger.With("storage", "InfluxDB"), + url.String(), + cfg.influxdbAuthToken, + cfg.organization, + cfg.bucket, ) prometheus.MustRegister(c) writers = append(writers, c) readers = append(readers, c) } - level.Info(logger).Log("msg", "Starting up...") + logger.Info("Starting up...") return writers, readers } -func serve(logger log.Logger, addr string, writers []writer, readers []reader) error { +func serve(logger *slog.Logger, addr string, writers []writer, readers []reader) error { http.HandleFunc("/write", func(w http.ResponseWriter, r *http.Request) { req, err := remote.DecodeWriteRequest(r.Body) if err != nil { - level.Error(logger).Log("msg", "Read error", "err", err.Error()) + logger.Error("Read error", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -238,21 +228,21 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e http.HandleFunc("/read", func(w http.ResponseWriter, r *http.Request) { compressed, err := io.ReadAll(r.Body) if err != nil { - level.Error(logger).Log("msg", "Read error", "err", err.Error()) + logger.Error("Read error", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } reqBuf, err := snappy.Decode(nil, compressed) if err != nil { - level.Error(logger).Log("msg", "Decode error", "err", err.Error()) + logger.Error("Decode error", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } var req prompb.ReadRequest if err := proto.Unmarshal(reqBuf, &req); err != nil { - level.Error(logger).Log("msg", "Unmarshal error", "err", err.Error()) + logger.Error("Unmarshal error", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -267,7 +257,7 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e var resp *prompb.ReadResponse resp, err = reader.Read(&req) if err != nil { - level.Warn(logger).Log("msg", "Error executing query", "query", req, "storage", reader.Name(), "err", err) + logger.Warn("Error executing query", "query", req, "storage", reader.Name(), "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -283,7 +273,7 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e compressed = snappy.Encode(nil, data) if _, err := w.Write(compressed); err != nil { - level.Warn(logger).Log("msg", "Error writing response", "storage", reader.Name(), "err", err) + logger.Warn("Error writing response", "storage", reader.Name(), "err", err) } }) @@ -309,12 +299,12 @@ func protoToSamples(req *prompb.WriteRequest) model.Samples { return samples } -func sendSamples(logger log.Logger, w writer, samples model.Samples) { +func sendSamples(logger *slog.Logger, w writer, samples model.Samples) { begin := time.Now() err := w.Write(samples) duration := time.Since(begin).Seconds() if err != nil { - level.Warn(logger).Log("msg", "Error sending samples to remote storage", "err", err, "storage", w.Name(), "num_samples", len(samples)) + logger.Warn("Error sending samples to remote storage", "err", err, "storage", w.Name(), "num_samples", len(samples)) failedSamples.WithLabelValues(w.Name()).Add(float64(len(samples))) } sentSamples.WithLabelValues(w.Name()).Add(float64(len(samples))) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go index abb1d0b7d3..433c70527a 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go @@ -19,13 +19,12 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "math" "net/http" "net/url" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" ) @@ -36,14 +35,14 @@ const ( // Client allows sending batches of Prometheus samples to OpenTSDB. type Client struct { - logger log.Logger + logger *slog.Logger url string timeout time.Duration } // NewClient creates a new Client. -func NewClient(logger log.Logger, url string, timeout time.Duration) *Client { +func NewClient(logger *slog.Logger, url string, timeout time.Duration) *Client { return &Client{ logger: logger, url: url, @@ -78,7 +77,7 @@ func (c *Client) Write(samples model.Samples) error { for _, s := range samples { v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "Cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s) + c.logger.Debug("Cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s) continue } metric := TagValue(s.Metric[model.MetricNameLabel]) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go index a30448e766..bc9703c88c 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go @@ -43,14 +43,14 @@ func TestMarshalStoreSamplesRequest(t *testing.T) { Value: 3.1415, Tags: tagsFromMetric(metric), } - expectedJSON := []byte(`{"metric":"test_.metric","timestamp":4711,"value":3.1415,"tags":{"many_chars":"abc_21ABC_.012-3_2145_C3_B667_7E89./","testlabel":"test_.value"}}`) + expectedJSON := `{"metric":"test_.metric","timestamp":4711,"value":3.1415,"tags":{"many_chars":"abc_21ABC_.012-3_2145_C3_B667_7E89./","testlabel":"test_.value"}}` resultingJSON, err := json.Marshal(request) require.NoError(t, err, "Marshal(request) resulted in err.") - require.Equal(t, expectedJSON, resultingJSON) + require.JSONEq(t, expectedJSON, string(resultingJSON)) var unmarshaledRequest StoreSamplesRequest - err = json.Unmarshal(expectedJSON, &unmarshaledRequest) + err = json.Unmarshal([]byte(expectedJSON), &unmarshaledRequest) require.NoError(t, err, "Unmarshal(expectedJSON, &unmarshaledRequest) resulted in err.") require.Equal(t, request, unmarshaledRequest) } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go index 99cef0b242..6a691778af 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go @@ -78,7 +78,7 @@ func (tv TagValue) MarshalJSON() ([]byte, error) { case b == ':': result.WriteString("_.") default: - result.WriteString(fmt.Sprintf("_%X", b)) + fmt.Fprintf(result, "_%X", b) } } result.WriteByte('"') diff --git a/documentation/prometheus-mixin/alerts.libsonnet b/documentation/prometheus-mixin/alerts.libsonnet index 563daab801..9a6de90d82 100644 --- a/documentation/prometheus-mixin/alerts.libsonnet +++ b/documentation/prometheus-mixin/alerts.libsonnet @@ -84,8 +84,8 @@ severity: 'warning', }, annotations: { - summary: 'Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager.', - description: '{{ printf "%%.1f" $value }}%% errors while sending alerts from Prometheus %(prometheusName)s to Alertmanager {{$labels.alertmanager}}.' % $._config, + summary: 'More than 1% of alerts sent by Prometheus to a specific Alertmanager were affected by errors.', + description: '{{ printf "%%.1f" $value }}%% of alerts sent by Prometheus %(prometheusName)s to Alertmanager {{$labels.alertmanager}} were affected by errors.' % $._config, }, }, { diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet index 2bdd168cc9..adf5da5c12 100644 --- a/documentation/prometheus-mixin/dashboards.libsonnet +++ b/documentation/prometheus-mixin/dashboards.libsonnet @@ -1,438 +1,826 @@ -local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet'; -local g = import 'github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet'; +local grafana = import 'github.com/grafana/grafonnet/gen/grafonnet-latest/main.libsonnet'; local dashboard = grafana.dashboard; -local row = grafana.row; -local singlestat = grafana.singlestat; -local prometheus = grafana.prometheus; -local graphPanel = grafana.graphPanel; -local tablePanel = grafana.tablePanel; -local template = grafana.template; +local prometheus = grafana.query.prometheus; +local variable = dashboard.variable; +local panel = grafana.panel; +local row = panel.row; + { grafanaDashboards+:: { + + local panelTimeSeriesStdOptions = + {} + + panel.timeSeries.queryOptions.withDatasource('prometheus', '$datasource') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(10) + + panel.timeSeries.fieldConfig.defaults.custom.withShowPoints('never') + + panel.timeSeries.options.tooltip.withMode('multi') + , + + local panelTimeSeriesStacking = + {} + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(100) + + panel.timeSeries.fieldConfig.defaults.custom.withLineWidth(0) + + panel.timeSeries.fieldConfig.defaults.custom.stacking.withMode('normal') + , + 'prometheus.json': + local showMultiCluster = $._config.showMultiCluster; - local dashboard = g.dashboard( - '%(prefix)sOverview' % $._config.grafanaPrometheus - ); - local templatedDashboard = if showMultiCluster then - dashboard - .addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, $._config.clusterLabel) - .addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job') - .addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance') - else - dashboard - .addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job') - .addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance'); - templatedDashboard - .addRow( - g.row('Prometheus Stats') - .addPanel( - g.panel('Prometheus Stats') + - g.tablePanel(if showMultiCluster then [ - 'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', - 'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', - ] else [ - 'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})', - 'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})', - ], { - cluster: { alias: if showMultiCluster then 'Cluster' else '' }, - job: { alias: 'Job' }, - instance: { alias: 'Instance' }, - version: { alias: 'Version' }, - 'Value #A': { alias: 'Count', type: 'hidden' }, - 'Value #B': { alias: 'Uptime', type: 'number', unit: 's' }, - }) - ) - ) - .addRow( - g.row('Discovery') - .addPanel( - g.panel('Target Sync') + - g.queryPanel(if showMultiCluster then 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3' - else 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3', - if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}' - else '{{scrape_job}}') + - { yaxes: g.yaxes('ms') } - ) - .addPanel( - g.panel('Targets') + - g.queryPanel(if showMultiCluster then 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})' - else 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})', - if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}' - else 'Targets') + - g.stack - ) - ) - .addRow( - g.row('Retrieval') - .addPanel( - g.panel('Average Scrape Interval Duration') + - g.queryPanel(if showMultiCluster then 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' - else 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3', - if showMultiCluster then '{{cluster}}:{{job}}:{{instance}} {{interval}} configured' - else '{{interval}} configured') + - { yaxes: g.yaxes('ms') } - ) - .addPanel( - g.panel('Scrape failures') + - g.queryPanel(if showMultiCluster then [ - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - ] else [ - 'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))', - 'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))', - 'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))', - 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))', - 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))', - ], if showMultiCluster then [ - 'exceeded body size limit: {{cluster}} {{job}} {{instance}}', - 'exceeded sample limit: {{cluster}} {{job}} {{instance}}', - 'duplicate timestamp: {{cluster}} {{job}} {{instance}}', - 'out of bounds: {{cluster}} {{job}} {{instance}}', - 'out of order: {{cluster}} {{job}} {{instance}}', - ] else [ - 'exceeded body size limit: {{job}}', - 'exceeded sample limit: {{job}}', - 'duplicate timestamp: {{job}}', - 'out of bounds: {{job}}', - 'out of order: {{job}}', - ]) + - g.stack - ) - .addPanel( - g.panel('Appended Samples') + - g.queryPanel(if showMultiCluster then 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' - else 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])', - if showMultiCluster then '{{cluster}} {{job}} {{instance}}' - else '{{job}} {{instance}}') + - g.stack - ) - ) - .addRow( - g.row('Storage') - .addPanel( - g.panel('Head Series') + - g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' - else 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}', - if showMultiCluster then '{{cluster}} {{job}} {{instance}} head series' - else '{{job}} {{instance}} head series') + - g.stack - ) - .addPanel( - g.panel('Head Chunks') + - g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' - else 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}', - if showMultiCluster then '{{cluster}} {{job}} {{instance}} head chunks' - else '{{job}} {{instance}} head chunks') + - g.stack - ) - ) - .addRow( - g.row('Query') - .addPanel( - g.panel('Query Rate') + - g.queryPanel(if showMultiCluster then 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' - else 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', - if showMultiCluster then '{{cluster}} {{job}} {{instance}}' - else '{{job}} {{instance}}') + - g.stack, - ) - .addPanel( - g.panel('Stage Duration') + - g.queryPanel(if showMultiCluster then 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' - else 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3', - if showMultiCluster then '{{slice}}' - else '{{slice}}') + - { yaxes: g.yaxes('ms') } + - g.stack, - ) - ) + { - tags: $._config.grafanaPrometheus.tags, - refresh: $._config.grafanaPrometheus.refresh, - }, + + local datasourceVariable = + variable.datasource.new('datasource', 'prometheus') + + variable.datasource.generalOptions.withLabel('Data source') + + variable.datasource.generalOptions.withCurrent('default') + + variable.datasource.generalOptions.showOnDashboard.withLabelAndValue() + ; + + local clusterVariable = + variable.query.new('cluster') + + variable.query.generalOptions.withLabel('cluster') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.withSort(type='alphabetical', asc=false) + + variable.query.selectionOptions.withIncludeAll(true, '.+') + + variable.query.selectionOptions.withMulti(true) + + variable.query.generalOptions.withCurrent('$__all') + + variable.query.queryTypes.withLabelValues($._config.clusterLabel, metric='prometheus_build_info{%(prometheusSelector)s}' % $._config) + + variable.datasource.generalOptions.showOnDashboard.withLabelAndValue() + ; + + local jobVariable = + variable.query.new('job') + + variable.query.generalOptions.withLabel('job') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.withSort(type='alphabetical', asc=false) + + variable.query.selectionOptions.withIncludeAll(true, '.+') + + variable.query.selectionOptions.withMulti(true) + + if showMultiCluster then + variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster"}' % $._config) + else + variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{%(prometheusSelector)s}' % $._config) + ; + + local instanceVariable = + variable.query.new('instance') + + variable.query.generalOptions.withLabel('instance') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.withSort(type='alphabetical', asc=false) + + variable.query.selectionOptions.withIncludeAll(true, '.+') + + variable.query.selectionOptions.withMulti(true) + + if showMultiCluster then + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster", job=~"$job"}' % $._config) + else + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{job=~"$job"}') + ; + + local prometheusStats = + panel.table.new('Prometheus Stats') + + panel.table.queryOptions.withDatasource('prometheus', '$datasource') + + panel.table.standardOptions.withUnit('short') + + panel.table.standardOptions.withDecimals(2) + + panel.table.standardOptions.withDisplayName('') + + panel.table.standardOptions.withOverrides([ + panel.table.standardOptions.override.byName.new('Time') + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Time') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('custom.hidden', 'true'), + panel.table.standardOptions.override.byName.new('cluster') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 'short') + + panel.table.standardOptions.override.byName.withProperty('decimals', 2) + + if showMultiCluster then panel.table.standardOptions.override.byName.withProperty('displayName', 'Cluster') else {}, + panel.table.standardOptions.override.byName.new('job') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 'short') + + panel.table.standardOptions.override.byName.withProperty('decimals', 2) + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Job'), + panel.table.standardOptions.override.byName.new('instance') + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Instance') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 'short') + + panel.table.standardOptions.override.byName.withProperty('decimals', 2), + panel.table.standardOptions.override.byName.new('version') + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Version') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 'short') + + panel.table.standardOptions.override.byName.withProperty('decimals', 2), + panel.table.standardOptions.override.byName.new('Value #A') + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Count') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 'short') + + panel.table.standardOptions.override.byName.withProperty('decimals', 2) + + panel.table.standardOptions.override.byName.withProperty('custom.hidden', 'true'), + panel.table.standardOptions.override.byName.new('Value #B') + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Uptime') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 's'), + ]) + + if showMultiCluster then + panel.table.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'count by (cluster, job, instance, version) (prometheus_build_info{%(clusterLabel)s=~"$cluster", job=~"$job", instance=~"$instance"})' % $._config + ) + + prometheus.withFormat('table') + + prometheus.withInstant(true) + + prometheus.withLegendFormat(''), + prometheus.new( + '$datasource', + 'max by (cluster, job, instance) (time() - process_start_time_seconds{%(clusterLabel)s=~"$cluster", job=~"$job", instance=~"$instance"})' % $._config + ) + + prometheus.withFormat('table') + + prometheus.withInstant(true) + + prometheus.withLegendFormat(''), + ]) + else + panel.table.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})' + ) + + prometheus.withFormat('table') + + prometheus.withInstant(true) + + prometheus.withLegendFormat(''), + prometheus.new( + '$datasource', + 'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})' + ) + + prometheus.withFormat('table') + + prometheus.withInstant(true) + + prometheus.withLegendFormat(''), + ]) + ; + + local targetSync = + panel.timeSeries.new('Target Sync') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panel.timeSeries.standardOptions.withUnit('ms') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum(rate(prometheus_target_sync_length_seconds_sum{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (%(clusterLabel)s, job, scrape_job, instance) * 1e3' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}:{{scrape_job}}' % $._config), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{scrape_job}}'), + ]) + ; + + local targets = + panel.timeSeries.new('Targets') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('short') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum by (%(clusterLabel)s, job, instance) (prometheus_sd_discovered_targets{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"})' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}' % $._config), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('Targets'), + ]) + ; + + local averageScrapeIntervalDuration = + panel.timeSeries.new('Average Scrape Interval Duration') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panel.timeSeries.standardOptions.withUnit('ms') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_target_interval_length_seconds_sum{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}} {{interval}} configured' % $._config), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{interval}} configured'), + ]) + ; + + local scrapeFailures = + panel.timeSeries.new('Scrape failures') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('short') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('exceeded body size limit: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), + prometheus.new( + '$datasource', + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('exceeded sample limit: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), + prometheus.new( + '$datasource', + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('duplicate timestamp: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), + prometheus.new( + '$datasource', + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('out of bounds: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), + prometheus.new( + '$datasource', + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('out of order: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('exceeded body size limit: {{job}}'), + prometheus.new( + '$datasource', + 'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('exceeded sample limit: {{job}}'), + prometheus.new( + '$datasource', + 'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('duplicate timestamp: {{job}}'), + prometheus.new( + '$datasource', + 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('out of bounds: {{job}}'), + prometheus.new( + '$datasource', + 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('out of order: {{job}}'), + ]) + ; + + local appendedSamples = + panel.timeSeries.new('Appended Samples') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('short') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_tsdb_head_samples_appended_total{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{job}} {{instance}}'), + ]) + ; + + local headSeries = + panel.timeSeries.new('Head Series') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('short') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_tsdb_head_series{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}} head series' % $._config), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{job}} {{instance}} head series'), + ]) + ; + + local headChunks = + panel.timeSeries.new('Head Chunks') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('short') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_tsdb_head_chunks{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}} head chunks' % $._config), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{job}} {{instance}} head chunks'), + ]) + ; + + local queryRate = + panel.timeSeries.new('Query Rate') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('short') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_engine_query_duration_seconds_count{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{job}} {{instance}}'), + ]) + ; + + local stageDuration = + panel.timeSeries.new('Stage Duration') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('ms') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{slice}}'), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{slice}}'), + ]) + ; + + dashboard.new('%(prefix)sOverview' % $._config.grafanaPrometheus) + + dashboard.time.withFrom('now-1h') + + dashboard.withTags($._config.grafanaPrometheus.tags) + + dashboard.withUid('9fa0d141-d019-4ad7-8bc5-42196ee308bd') + + dashboard.timepicker.withRefreshIntervals($._config.grafanaPrometheus.refresh) + + dashboard.withVariables(std.prune([ + datasourceVariable, + if showMultiCluster then clusterVariable, + jobVariable, + instanceVariable, + ])) + + dashboard.withPanels( + grafana.util.grid.makeGrid([ + row.new('Prometheus Stats') + + row.withPanels([ + prometheusStats, + ]), + ], panelWidth=24, panelHeight=7) + + + grafana.util.grid.makeGrid([ + row.new('Discovery') + + row.withPanels([ + targetSync, + targets, + ]), + ], panelWidth=12, panelHeight=7, startY=8) + + + grafana.util.grid.makeGrid([ + row.new('Retrieval') + + row.withPanels([ + averageScrapeIntervalDuration, + scrapeFailures, + appendedSamples, + ]), + ], panelWidth=8, panelHeight=7, startY=16) + + + grafana.util.grid.makeGrid([ + row.new('Storage') + + row.withPanels([ + headSeries, + headChunks, + ]), + row.new('Query') + + row.withPanels([ + queryRate, + stageDuration, + ]), + ], panelWidth=12, panelHeight=7, startY=24) + ), // Remote write specific dashboard. 'prometheus-remote-write.json': + + local datasourceVariable = + variable.datasource.new('datasource', 'prometheus') + + variable.datasource.generalOptions.withCurrent('default') + + variable.datasource.generalOptions.showOnDashboard.withLabelAndValue() + ; + + local clusterVariable = + variable.query.new('cluster') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.selectionOptions.withIncludeAll(true) + + variable.query.generalOptions.withCurrent('$__all') + + variable.query.queryTypes.withLabelValues($._config.clusterLabel, metric='prometheus_build_info') + + variable.datasource.generalOptions.showOnDashboard.withLabelAndValue() + ; + + local instanceVariable = + variable.query.new('instance') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.selectionOptions.withIncludeAll(true) + + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster"}' % $._config) + ; + + local urlVariable = + variable.query.new('url') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.selectionOptions.withIncludeAll(true) + + variable.query.queryTypes.withLabelValues('url', metric='prometheus_remote_storage_shards{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config) + ; + local timestampComparison = - graphPanel.new( - 'Highest Timestamp In vs. Highest Timestamp Sent', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - ||| - ( - prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"} - - - ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"} != 0) - ) - |||, - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}', - )); + panel.timeSeries.new('Highest Timestamp In vs. Highest Timestamp Sent') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + ( + prometheus_remote_storage_highest_timestamp_in_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance"} + - + ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"} != 0) + ) + ||| % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}::{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local timestampComparisonRate = - graphPanel.new( - 'Rate[5m]', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - ||| - clamp_min( - rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m]) - - - ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) - , 0) - |||, - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}', - )); + panel.timeSeries.new('Rate[5m]') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + clamp_min( + rate(prometheus_remote_storage_highest_timestamp_in_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance"}[5m]) + - + ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) + , 0) + ||| % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local samplesRate = - graphPanel.new( - 'Rate, in vs. succeeded or dropped [5m]', - datasource='$datasource', - span=12, - ) - .addTarget(prometheus.target( - ||| - rate( - prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m]) - - - ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) - - - (rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) - |||, - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Rate, in vs. succeeded or dropped [5m]') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + rate( + prometheus_remote_storage_samples_in_total{%(clusterLabel)s=~"$cluster", instance=~"$instance"}[5m]) + - + ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) + - + (rate(prometheus_remote_storage_dropped_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) + ||| % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local currentShards = - graphPanel.new( - 'Current Shards', - datasource='$datasource', - span=12, - min_span=6, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Current Shards') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_shards{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local maxShards = - graphPanel.new( - 'Max Shards', - datasource='$datasource', - span=4, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Max Shards') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_shards_max{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local minShards = - graphPanel.new( - 'Min Shards', - datasource='$datasource', - span=4, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Min Shards') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_shards_min{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local desiredShards = - graphPanel.new( - 'Desired Shards', - datasource='$datasource', - span=4, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Desired Shards') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_shards_desired{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local shardsCapacity = - graphPanel.new( - 'Shard Capacity', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); - + panel.timeSeries.new('Shard Capacity') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_shard_capacity{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local pendingSamples = - graphPanel.new( - 'Pending Samples', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Pending Samples') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_pending_samples{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local walSegment = - graphPanel.new( - 'TSDB Current Segment', - datasource='$datasource', - span=6, - formatY1='none', - ) - .addTarget(prometheus.target( - 'prometheus_tsdb_wal_segment_current{cluster=~"$cluster", instance=~"$instance"}', - legendFormat='{{cluster}}:{{instance}}' - )); + panel.timeSeries.new('TSDB Current Segment') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.standardOptions.withUnit('none') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_tsdb_wal_segment_current{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}}' % $._config), + ]); local queueSegment = - graphPanel.new( - 'Remote Write Current Segment', - datasource='$datasource', - span=6, - formatY1='none', - ) - .addTarget(prometheus.target( - 'prometheus_wal_watcher_current_segment{cluster=~"$cluster", instance=~"$instance"}', - legendFormat='{{cluster}}:{{instance}} {{consumer}}' - )); + panel.timeSeries.new('Remote Write Current Segment') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.standardOptions.withUnit('none') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_wal_watcher_current_segment{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{consumer}}' % $._config), + ]); local droppedSamples = - graphPanel.new( - 'Dropped Samples', - datasource='$datasource', - span=3, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Dropped Samples') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_remote_storage_dropped_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local failedSamples = - graphPanel.new( - 'Failed Samples', - datasource='$datasource', - span=3, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Failed Samples') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_remote_storage_failed_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local retriedSamples = - graphPanel.new( - 'Retried Samples', - datasource='$datasource', - span=3, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Retried Samples') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_remote_storage_retried_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); local enqueueRetries = - graphPanel.new( - 'Enqueue Retries', - datasource='$datasource', - span=3, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Enqueue Retries') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_remote_storage_enqueue_retries_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), + ]); - dashboard.new( - title='%(prefix)sRemote Write' % $._config.grafanaPrometheus, - editable=true - ) - .addTemplate( - { - hide: 0, - label: null, - name: 'datasource', - options: [], - query: 'prometheus', - refresh: 1, - regex: '', - type: 'datasource', - }, - ) - .addTemplate( - template.new( - 'cluster', - '$datasource', - 'label_values(prometheus_build_info, cluster)' % $._config, - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ) - ) - .addTemplate( - template.new( - 'instance', - '$datasource', - 'label_values(prometheus_build_info{cluster=~"$cluster"}, instance)' % $._config, - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ) - ) - .addTemplate( - template.new( - 'url', - '$datasource', - 'label_values(prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}, url)' % $._config, - refresh='time', - includeAll=true, - ) - ) - .addRow( - row.new('Timestamps') - .addPanel(timestampComparison) - .addPanel(timestampComparisonRate) - ) - .addRow( - row.new('Samples') - .addPanel(samplesRate) - ) - .addRow( - row.new( - 'Shards' - ) - .addPanel(currentShards) - .addPanel(maxShards) - .addPanel(minShards) - .addPanel(desiredShards) - ) - .addRow( - row.new('Shard Details') - .addPanel(shardsCapacity) - .addPanel(pendingSamples) - ) - .addRow( - row.new('Segments') - .addPanel(walSegment) - .addPanel(queueSegment) - ) - .addRow( - row.new('Misc. Rates') - .addPanel(droppedSamples) - .addPanel(failedSamples) - .addPanel(retriedSamples) - .addPanel(enqueueRetries) - ) + { - tags: $._config.grafanaPrometheus.tags, - refresh: $._config.grafanaPrometheus.refresh, - }, + dashboard.new('%(prefix)sRemote Write' % $._config.grafanaPrometheus) + + dashboard.time.withFrom('now-1h') + + dashboard.withTags($._config.grafanaPrometheus.tags) + + dashboard.timepicker.withRefreshIntervals($._config.grafanaPrometheus.refresh) + + dashboard.withVariables([ + datasourceVariable, + clusterVariable, + instanceVariable, + urlVariable, + ]) + + dashboard.withPanels( + grafana.util.grid.makeGrid([ + row.new('Timestamps') + + row.withPanels([ + timestampComparison, + timestampComparisonRate, + ]), + ], panelWidth=12, panelHeight=7) + + + grafana.util.grid.makeGrid([ + row.new('Samples') + + row.withPanels([ + samplesRate + + panel.timeSeries.gridPos.withW(24), + ]), + row.new('Shards'), + ], panelWidth=24, panelHeight=7, startY=8) + + + grafana.util.grid.wrapPanels([ + currentShards + + panel.timeSeries.gridPos.withW(24), + maxShards, + minShards, + desiredShards, + ], panelWidth=8, panelHeight=7, startY=16) + + + grafana.util.grid.makeGrid([ + row.new('Shard Details') + + row.withPanels([ + shardsCapacity, + pendingSamples, + ]), + row.new('Segments') + + row.withPanels([ + walSegment, + queueSegment, + ]), + ], panelWidth=12, panelHeight=7, startY=24) + + + grafana.util.grid.makeGrid([ + row.new('Misc. Rates') + + row.withPanels([ + droppedSamples, + failedSamples, + retriedSamples, + enqueueRetries, + ]), + ], panelWidth=6, panelHeight=7, startY=40) + ), }, } diff --git a/documentation/prometheus-mixin/jsonnetfile.json b/documentation/prometheus-mixin/jsonnetfile.json index 1c64fd0151..2d56d91245 100644 --- a/documentation/prometheus-mixin/jsonnetfile.json +++ b/documentation/prometheus-mixin/jsonnetfile.json @@ -4,20 +4,11 @@ { "source": { "git": { - "remote": "https://github.com/grafana/grafonnet-lib.git", - "subdir": "grafonnet" + "remote": "https://github.com/grafana/grafonnet.git", + "subdir": "gen/grafonnet-latest" } }, - "version": "master" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs.git", - "subdir": "grafana-builder" - } - }, - "version": "master" + "version": "main" } ], "legacyImports": false diff --git a/go.mod b/go.mod index 4107f3a099..03abd8662d 100644 --- a/go.mod +++ b/go.mod @@ -1,114 +1,137 @@ module github.com/prometheus/prometheus -go 1.21.0 - -toolchain go1.22.5 +go 1.23.0 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 github.com/Code-Hex/go-generics-cache v1.5.1 - github.com/KimMachineGun/automemlimit v0.6.1 + github.com/KimMachineGun/automemlimit v0.7.3 github.com/alecthomas/kingpin/v2 v2.4.0 - github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 - github.com/aws/aws-sdk-go v1.53.16 + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b + github.com/aws/aws-sdk-go v1.55.7 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.117.0 - github.com/docker/docker v26.1.3+incompatible - github.com/edsrzf/mmap-go v1.1.0 - github.com/envoyproxy/go-control-plane v0.12.0 - github.com/envoyproxy/protoc-gen-validate v1.0.4 + github.com/digitalocean/godo v1.152.0 + github.com/docker/docker v28.2.2+incompatible + github.com/edsrzf/mmap-go v1.2.0 + github.com/envoyproxy/go-control-plane/envoy v1.32.4 + github.com/envoyproxy/protoc-gen-validate v1.2.1 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb - github.com/fsnotify/fsnotify v1.7.0 - github.com/go-kit/log v0.2.1 - github.com/go-logfmt/logfmt v0.6.0 + github.com/fsnotify/fsnotify v1.8.0 github.com/go-openapi/strfmt v0.23.0 - github.com/go-zookeeper/zk v1.0.3 + github.com/go-zookeeper/zk v1.0.4 github.com/gogo/protobuf v1.3.2 - github.com/golang/snappy v0.0.4 - github.com/google/go-cmp v0.6.0 - github.com/google/pprof v0.0.0-20240528025155-186aa0362fba + github.com/golang/snappy v1.0.0 + github.com/google/go-cmp v0.7.0 + github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a github.com/google/uuid v1.6.0 - github.com/gophercloud/gophercloud v1.12.0 + github.com/gophercloud/gophercloud/v2 v2.7.0 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc - github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.29.1 - github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d - github.com/hetznercloud/hcloud-go/v2 v2.9.0 - github.com/ionos-cloud/sdk-go/v6 v6.1.11 + github.com/hashicorp/consul/api v1.32.0 + github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec + github.com/hetznercloud/hcloud-go/v2 v2.21.1 + github.com/ionos-cloud/sdk-go/v6 v6.3.4 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.8 + github.com/klauspost/compress v1.18.0 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.35.0 - github.com/miekg/dns v1.1.59 + github.com/linode/linodego v1.52.1 + github.com/miekg/dns v1.1.66 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 - github.com/oklog/run v1.1.0 - github.com/oklog/ulid v1.3.1 - github.com/ovh/go-ovh v1.5.1 - github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.19.1 - github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.54.0 + github.com/oklog/run v1.2.0 + github.com/oklog/ulid/v2 v2.1.1 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0 + github.com/ovh/go-ovh v1.8.0 + github.com/prometheus/alertmanager v0.28.1 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_model v0.6.2 + github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 github.com/prometheus/common/assets v0.2.0 - github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.11.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 + github.com/prometheus/exporter-toolkit v0.14.0 + github.com/prometheus/sigv4 v0.2.0 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c - github.com/stretchr/testify v1.9.0 + github.com/stackitcloud/stackit-sdk-go/core v0.17.2 + github.com/stretchr/testify v1.10.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.11.0 - go.opentelemetry.io/collector/semconv v0.104.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 - go.opentelemetry.io/otel v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 - go.opentelemetry.io/otel/sdk v1.27.0 - go.opentelemetry.io/otel/trace v1.27.0 + go.opentelemetry.io/collector/component v1.34.0 + go.opentelemetry.io/collector/consumer v1.34.0 + go.opentelemetry.io/collector/pdata v1.34.0 + go.opentelemetry.io/collector/processor v1.34.0 + go.opentelemetry.io/collector/semconv v0.128.0 + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 + go.opentelemetry.io/otel v1.36.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 + go.opentelemetry.io/otel/metric v1.36.0 + go.opentelemetry.io/otel/sdk v1.36.0 + go.opentelemetry.io/otel/trace v1.36.0 go.uber.org/atomic v1.11.0 - go.uber.org/automaxprocs v1.5.3 + go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/net v0.26.0 - golang.org/x/oauth2 v0.21.0 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.21.0 - golang.org/x/text v0.16.0 - golang.org/x/time v0.5.0 - golang.org/x/tools v0.22.0 - google.golang.org/api v0.183.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 - google.golang.org/grpc v1.64.0 - google.golang.org/protobuf v1.34.2 + golang.org/x/oauth2 v0.30.0 + golang.org/x/sync v0.15.0 + golang.org/x/sys v0.33.0 + golang.org/x/text v0.26.0 + google.golang.org/api v0.238.0 + google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 + google.golang.org/grpc v1.73.0 + google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.29.3 - k8s.io/apimachinery v0.29.3 - k8s.io/client-go v0.29.3 + k8s.io/api v0.32.3 + k8s.io/apimachinery v0.32.3 + k8s.io/client-go v0.32.3 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.120.1 + k8s.io/klog/v2 v2.130.1 ) require ( - cloud.google.com/go/auth v0.5.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect + github.com/aws/aws-sdk-go-v2/config v1.29.14 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect + github.com/aws/smithy-go v1.22.2 // indirect + github.com/cenkalti/backoff/v5 v5.0.2 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/moby/sys/atomicwriter v0.1.0 // indirect + github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect + go.opentelemetry.io/collector/featuregate v1.34.0 // indirect + go.opentelemetry.io/collector/internal/telemetry v0.128.0 // indirect + go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect + go.opentelemetry.io/otel/log v0.12.2 // indirect +) + +require ( + cloud.google.com/go/auth v0.16.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cilium/ebpf v0.11.0 // indirect - github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect - github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -116,35 +139,31 @@ require ( github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-kit/kit v0.12.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.22.2 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/errors v0.22.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect - github.com/go-openapi/jsonreference v0.20.4 // indirect - github.com/go-openapi/loads v0.21.5 // indirect - github.com/go-openapi/spec v0.20.14 // indirect - github.com/go-openapi/swag v0.22.9 // indirect - github.com/go-openapi/validate v0.23.0 // indirect - github.com/go-resty/resty/v2 v2.13.1 // indirect - github.com/godbus/dbus/v5 v5.0.4 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/glog v1.2.0 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect + github.com/go-resty/resty/v2 v2.16.5 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/golang-jwt/jwt/v5 v5.2.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -155,56 +174,68 @@ require ( github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/imdario/mergo v0.3.16 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect + github.com/knadh/koanf/maps v0.1.2 // indirect + github.com/knadh/koanf/providers/confmap v1.0.0 // indirect + github.com/knadh/koanf/v2 v2.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mdlayher/vsock v1.2.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect - github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.12.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect + github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588 + github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.18.0 // indirect - golang.org/x/term v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/confmap v1.34.0 // indirect + go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 // indirect + go.opentelemetry.io/collector/pipeline v0.128.0 // indirect + go.opentelemetry.io/proto/otlp v1.6.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/mod v0.25.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/term v0.32.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.33.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect -) - -replace ( - k8s.io/klog => github.com/simonpasquier/klog-gokit v0.3.0 - k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.3.0 + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) // Exclude linodego v1.0.0 as it is no longer published on github. @@ -215,3 +246,6 @@ exclude ( github.com/grpc-ecosystem/grpc-gateway v1.14.7 google.golang.org/api v0.30.0 ) + +// Pin until https://github.com/fsnotify/fsnotify/issues/656 is resolved. +replace github.com/fsnotify/fsnotify v1.8.0 => github.com/fsnotify/fsnotify v1.7.0 diff --git a/go.sum b/go.sum index dc4a7ecfd7..c540ec490c 100644 --- a/go.sum +++ b/go.sum @@ -1,47 +1,17 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= -cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= +cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= +cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= @@ -52,49 +22,61 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1. github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8= -github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/KimMachineGun/automemlimit v0.7.3 h1:oPgMp0bsWez+4fvgSa11Rd9nUDrd8RLtDjBoT3ro+/A= +github.com/KimMachineGun/automemlimit v0.7.3/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= -github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= +github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= +github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= +github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -102,39 +84,23 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= +github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= -github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= -github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k= +github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -142,39 +108,28 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= -github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/digitalocean/godo v1.152.0 h1:WRgkPMogZSXEJK70IkZKTB/PsMn16hMQ+NI3wCIQdzA= +github.com/digitalocean/godo v1.152.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw= +github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= -github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84= +github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -184,180 +139,99 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= -github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= -github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= -github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= -github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= -github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= -github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= -github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= -github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= -github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= -github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= -github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= -github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= -github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= +github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= +github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g= -github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= +github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= -github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= -github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= +github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E= +github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= -github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= -github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= -github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg= +github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= @@ -366,7 +240,6 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= @@ -377,6 +250,8 @@ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack/v2 v2.1.1 h1:xQEY9yB2wnHitoSzk/B9UjXWRQ67QKu5AOm8aFp8N3I= +github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -384,82 +259,66 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9 github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= -github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= -github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/memberlist v0.5.1 h1:mk5dRuzeDNis2bi6LLoQIXfMH7JQvAzt3mQD0vNZZUo= +github.com/hashicorp/memberlist v0.5.1/go.mod h1:zGDXV6AqbDTKTM6yxW0I4+JtFzZAJVoIPvss4hV8F24= +github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A= +github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= -github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= -github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= -github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= -github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/hetznercloud/hcloud-go/v2 v2.21.1 h1:IH3liW8/cCRjfJ4cyqYvw3s1ek+KWP8dl1roa0lD8JM= +github.com/hetznercloud/hcloud-go/v2 v2.21.1/go.mod h1:XOaYycZJ3XKMVWzmqQ24/+1V7ormJHmPdck/kxrNnQA= +github.com/ionos-cloud/sdk-go/v6 v6.3.4 h1:jTvGl4LOF8v8OYoEIBNVwbFoqSGAFqn6vGE7sp7/BqQ= +github.com/ionos-cloud/sdk-go/v6 v6.3.4/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48= +github.com/jarcoal/httpmock v1.4.0 h1:BvhqnH0JAYbNudL2GMJKgOHe2CtKlzJ/5rWKyp+hc2k= +github.com/jarcoal/httpmock v1.4.0/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= +github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE= +github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A= +github.com/knadh/koanf/v2 v2.2.0 h1:FZFwd9bUjpb8DyCWARUBy5ovuhDs1lI87dOEn2K8UVU= +github.com/knadh/koanf/v2 v2.2.0/go.mod h1:PSFru3ufQgTsI7IF+95rf9s8XA1+aHxKuO/W+dPoHEY= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -470,11 +329,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= -github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/linode/linodego v1.52.1 h1:HJ1cz1n9n3chRP9UrtqmP91+xTi0Q5l+H/4z4tpkwgQ= +github.com/linode/linodego v1.52.1/go.mod h1:zEN2sX+cSdp67EuRY1HJiyuLujoa7HqvVwNEcJv3iXw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -485,7 +341,6 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -493,31 +348,35 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= +github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= -github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= +github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -534,64 +393,46 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 h1:dOYG7LS/WK00RWZc8XGgcUTlTxpp3mKhdR2Q9z9HbXM= github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E= +github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s= +github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 h1:hZa4FkI2JhYC0tkiwOepnHyyfWzezz3FfCmt88nWJa0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0/go.mod h1:sLbOuJEFckPdw4li0RtWpoSsMeppcck3s/cmzPyKAgc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.128.0 h1:+rUULr4xqOJjZK3SokFmRYzsiPq5onoWoSv3He4aaus= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.128.0/go.mod h1:Fh2SXPeFkr4J97w9CV/apFAib8TC9Hi0P08xtiT7Lng= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 h1:8OWwRSdIhm3DY3PEYJ0PtSEz1a1OjL0fghLXSr14JMk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0/go.mod h1:32OeaysZe4vkSmD1LJ18Q1DfooryYqpSzFNmz+5A5RU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0 h1:9wVFaWEhgV8WQD+nP662nHNaQIkmyF57KRhtsqlaWEI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0/go.mod h1:Yak3vQIvwYQiAO83u+zD9ujdCmpcDL7JSfg2YK+Mwn4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= -github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/ovh/go-ovh v1.8.0 h1:eQ5TAAFZvZAVarQir62oaTL+8a503pIBuOWVn72iGtY= +github.com/ovh/go-ovh v1.8.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -599,87 +440,59 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= -github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= +github.com/prometheus/alertmanager v0.28.1 h1:BK5pCoAtaKg01BYRUJhEDV1tqJMEtYBGzPw8QdvnnvA= +github.com/prometheus/alertmanager v0.28.1/go.mod h1:0StpPUDDHi1VXeM7p2yYfeZgLVi/PPlt39vo9LQUHxM= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 h1:R/zO7ombSHCI8bjQusgCMSL+cE669w5/R2upq5WlPD0= +github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= -github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= -github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= -github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= +github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= +github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= +github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588 h1:QlySqDdSESgWDePeAYskbbcKKdowI26m9aU9zloHyYE= +github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/sigv4 v0.2.0 h1:qDFKnHYFswJxdzGeRP63c4HlH3Vbn1Yf/Ao2zabtVXk= +github.com/prometheus/sigv4 v0.2.0/go.mod h1:D04rqmAaPPEUkjRQxGqjoxdyJuyCh6E0M18fZr0zBiE= +github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= +github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= +github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= +github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs= -github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k= -github.com/simonpasquier/klog-gokit/v3 v3.3.0 h1:HMzH999kO5gEgJTaWWO+xjncW5oycspcsBnjn9b853Q= -github.com/simonpasquier/klog-gokit/v3 v3.3.0/go.mod h1:uSbnWC3T7kt1dQyY9sjv0Ao1SehMAJdVnUNSKhjaDsg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stackitcloud/stackit-sdk-go/core v0.17.2 h1:jPyn+i8rkp2hM80+hOg0B/1EVRbMt778Tr5RWyK1m2E= +github.com/stackitcloud/stackit-sdk-go/core v0.17.2/go.mod h1:8KIw3czdNJ9sdil9QQimxjR6vHjeINFrRv0iZ67wfn0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -689,463 +502,213 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= -go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= -go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= -go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/component v1.34.0 h1:YONg7FaZ5zZbj5cLdARvwtMNuZHunuyxw2fWe5fcWqc= +go.opentelemetry.io/collector/component v1.34.0/go.mod h1:GvolsSVZskXuyfQdwYacqeBSZe/1tg4RJ0YK55KSvDA= +go.opentelemetry.io/collector/component/componentstatus v0.128.0 h1:0lEYHgUQEMMkl5FLtMgDH8lue4B3auElQINzGIWUya4= +go.opentelemetry.io/collector/component/componentstatus v0.128.0/go.mod h1:8vVO6JSV+edmiezJsQzW7aKQ7sFLIN6S3JawKBI646o= +go.opentelemetry.io/collector/component/componenttest v0.128.0 h1:MGNh5lQQ0Qmz2SmNwOqLJYaWMDkMLYj/51wjMzTBR34= +go.opentelemetry.io/collector/component/componenttest v0.128.0/go.mod h1:hALNxcacqOaX/Gm/dE7sNOxAEFj41SbRqtvF57Yd6gs= +go.opentelemetry.io/collector/confmap v1.34.0 h1:PG4sYlLxgCMnA5F7daKXZV+NKjU1IzXBzVQeyvcwyh0= +go.opentelemetry.io/collector/confmap v1.34.0/go.mod h1:BbAit8+hAJg5vyFBQoDh9vOXOH8UzCdNu91jCh+b72E= +go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 h1:hcVKU45pjC+PLz7xUc8kwSlR5wsN2w8hs9midZ3ez10= +go.opentelemetry.io/collector/confmap/xconfmap v0.128.0/go.mod h1:2928x4NAAu1CysfzLbEJE6MSSDB/gOYVq6YRGWY9LmM= +go.opentelemetry.io/collector/consumer v1.34.0 h1:oBhHH6mgViOGhVDPozE+sUdt7jFBo2Hh32lsSr2L3Tc= +go.opentelemetry.io/collector/consumer v1.34.0/go.mod h1:DVMCb56ZBlPNcmo0lSJKn3rp18oyZQCedRE4GKIMI+Q= +go.opentelemetry.io/collector/consumer/consumertest v0.128.0 h1:x50GB0I/QvU3sQuNCap5z/P2cnq2yHoRJ/8awkiT87w= +go.opentelemetry.io/collector/consumer/consumertest v0.128.0/go.mod h1:Wb3IAbMY/DOIwJPy81PuBiW2GnKoNIz4THE7wfJwovE= +go.opentelemetry.io/collector/consumer/xconsumer v0.128.0 h1:4E+KTdCjkRS3SIw0bsv5kpv9XFXHf8x9YiPEuxBVEHY= +go.opentelemetry.io/collector/consumer/xconsumer v0.128.0/go.mod h1:OmzilL/qbjCzPMHay+WEA7/cPe5xuX7Jbj5WPIpqaMo= +go.opentelemetry.io/collector/featuregate v1.34.0 h1:zqDHpEYy1UeudrfUCvlcJL2t13dXywrC6lwpNZ5DrCU= +go.opentelemetry.io/collector/featuregate v1.34.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc= +go.opentelemetry.io/collector/internal/telemetry v0.128.0 h1:ySEYWoY7J8DAYdlw2xlF0w+ODQi3AhYj7TRNflsCbx8= +go.opentelemetry.io/collector/internal/telemetry v0.128.0/go.mod h1:572B/iJqjauv3aT+zcwnlNWBPqM7+KqrYGSUuOAStrM= +go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8= +go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI= +go.opentelemetry.io/collector/pdata/pprofile v0.128.0 h1:6DEtzs/liqv/ukz2EHbC5OMaj2V6K2pzuj/LaRg2YmY= +go.opentelemetry.io/collector/pdata/pprofile v0.128.0/go.mod h1:bVVRpz+zKFf1UCCRUFqy8LvnO3tHlXKkdqW2d+Wi/iA= +go.opentelemetry.io/collector/pdata/testdata v0.128.0 h1:5xcsMtyzvb18AnS2skVtWreQP1nl6G3PiXaylKCZ6pA= +go.opentelemetry.io/collector/pdata/testdata v0.128.0/go.mod h1:9/VYVgzv3JMuIyo19KsT3FwkVyxbh3Eg5QlabQEUczA= +go.opentelemetry.io/collector/pipeline v0.128.0 h1:WgNXdFbyf/QRLy5XbO/jtPQosWrSWX/TEnSYpJq8bgI= +go.opentelemetry.io/collector/pipeline v0.128.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= +go.opentelemetry.io/collector/processor v1.34.0 h1:5pwXIG12XXxdkJ8F68e2cBEjEnFlCIAZhqEYM7vjkqE= +go.opentelemetry.io/collector/processor v1.34.0/go.mod h1:VCl4vYj2tdO4APUcr0q6Eh796mqCCsH9Z/gqaPuzlUs= +go.opentelemetry.io/collector/processor/processortest v0.128.0 h1:xPhOSmGFDGqhC3/nu1BqPSE6EpDPAf1/F+BfaYjDn/8= +go.opentelemetry.io/collector/processor/processortest v0.128.0/go.mod h1:XXXom+mbAQtrkcvq4Ecd6n8RQoVgcfLe1vrUlr6U2gI= +go.opentelemetry.io/collector/processor/xprocessor v0.128.0 h1:ObbtdXab0is6bdt4XabsRJZ+SUTuwQjPVlHTbmScfNg= +go.opentelemetry.io/collector/processor/xprocessor v0.128.0/go.mod h1:/nHXW15nzwSRQ+25Cb+r17he/uMtCEvSOBGqpDbn3Uk= +go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4= +go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns= +go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw= +go.opentelemetry.io/contrib/bridges/otelzap v0.11.0/go.mod h1:pJPCLM8gzX4ASqLlyAXjHBEYxgbOQJ/9bidWxD6PEPQ= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0 h1:lREC4C0ilyP4WibDhQ7Gg2ygAQFP8oR07Fst/5cafwI= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0/go.mod h1:HfvuU0kW9HewH14VCOLImqKvUgONodURG7Alj/IrnGI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= +go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc= +go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= +go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989 h1:4JF7oY9CcHrPGfBLijDcXZyCzGckVEyOjuat5ktmQRg= +go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989/go.mod h1:NToOxLDCS1tXDSB2dIj44H9xGPOpKr0csIN+gnuihv4= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI= +go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= -google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/api v0.238.0 h1:+EldkglWIg/pWjkq97sd+XxH7PxakNYoe/rkSTbnvOs= +google.golang.org/api v0.238.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1154,32 +717,23 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/tools/go.mod b/internal/tools/go.mod new file mode 100644 index 0000000000..046a272340 --- /dev/null +++ b/internal/tools/go.mod @@ -0,0 +1,115 @@ +module github.com/prometheus/prometheus/internal/tools + +go 1.23.0 + +require ( + github.com/bufbuild/buf v1.51.0 + github.com/daixiang0/gci v0.13.6 + github.com/gogo/protobuf v1.3.2 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 +) + +require ( + buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.5-20250121211742-6d880cc6cc8d.1 // indirect + buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.5-20250307204501-0409229c3780.1 // indirect + buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250116203702-1c024d64352b.1 // indirect + buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.5-20250116203702-1c024d64352b.1 // indirect + buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.4-20241007202033-cf42259fcbfc.1 // indirect + buf.build/go/bufplugin v0.8.0 // indirect + buf.build/go/protoyaml v0.3.1 // indirect + buf.build/go/spdx v0.2.0 // indirect + cel.dev/expr v0.21.2 // indirect + connectrpc.com/connect v1.18.1 // indirect + connectrpc.com/otelconnect v0.7.2 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect + github.com/bufbuild/protocompile v0.14.1 // indirect + github.com/bufbuild/protoplugin v0.0.0-20250106231243-3a819552c9d9 // indirect + github.com/bufbuild/protovalidate-go v0.9.3-0.20250317160558-38a17488914d // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v27.5.1+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v28.0.2+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/fgprof v0.9.5 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-chi/chi/v5 v5.2.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gofrs/flock v0.12.1 // indirect + github.com/google/cel-go v0.24.1 // indirect + github.com/google/go-containerregistry v0.20.3 // indirect + github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jdx/go-netrc v1.0.0 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/locker v1.0.1 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/mount v0.3.4 // indirect + github.com/moby/sys/mountinfo v0.7.2 // indirect + github.com/moby/sys/reexec v0.1.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/onsi/ginkgo/v2 v2.22.2 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pkg/profile v1.7.0 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.50.1 // indirect + github.com/rs/cors v1.11.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/segmentio/asm v1.2.0 // indirect + github.com/segmentio/encoding v0.4.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cobra v1.9.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/tetratelabs/wazero v1.9.0 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + go.lsp.dev/jsonrpc2 v0.10.0 // indirect + go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 // indirect + go.lsp.dev/protocol v0.12.0 // indirect + go.lsp.dev/uri v0.3.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.uber.org/mock v0.5.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + go.uber.org/zap/exp v0.3.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/tools v0.31.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/grpc v1.70.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + pluginrpc.com/pluginrpc v0.5.0 // indirect +) diff --git a/internal/tools/go.sum b/internal/tools/go.sum new file mode 100644 index 0000000000..54a200b5a6 --- /dev/null +++ b/internal/tools/go.sum @@ -0,0 +1,328 @@ +buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.5-20250121211742-6d880cc6cc8d.1 h1:z/NYWpgoeKkKL3+LYF+8QK58Rjz3qkMAshpdzJTaJ7o= +buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.5-20250121211742-6d880cc6cc8d.1/go.mod h1:LpnZWZGTs6IBCnY9WHAkR9X4/NbpL5nwOXivQdXILTs= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.5-20250307204501-0409229c3780.1 h1:j+l4+E1EEo83GVIxuqinfFOTyImSQUH90WfufE86xaI= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.5-20250307204501-0409229c3780.1/go.mod h1:eOqrCVUfhh7SLo00urDe/XhJHljj0dWMZirS0aX7cmc= +buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250116203702-1c024d64352b.1 h1:1SDs5tEGoWWv2vmKLx2B0Bp+yfhlxiU4DaZUII8+Pvs= +buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250116203702-1c024d64352b.1/go.mod h1:o2AgVM1j3MczvxnMqfZTpiqGwK1VD4JbEagseY0QcjE= +buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.5-20250116203702-1c024d64352b.1 h1:MTNYELBBDEj2ddEwWb/vuAm5PRLyWtZe7CLnc6WZ5qQ= +buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.5-20250116203702-1c024d64352b.1/go.mod h1:E8bt4dG1/NfuocvVmlDNWIfKoLK0B4AgGq4ubwEGBvo= +buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.4-20241007202033-cf42259fcbfc.1 h1:XmYgi9W/9oST2ZrfT3ucGWkzD9+Vd0ls9yhyZ8ae0KQ= +buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.4-20241007202033-cf42259fcbfc.1/go.mod h1:cxFpqWIC80Wm8YNo1038ocBmrF84uQ0IfL0uVdAu9ZY= +buf.build/go/bufplugin v0.8.0 h1:YgR1+CNGmzR69jt85oRWTa5FioZoX/tOrHV+JxfNnnk= +buf.build/go/bufplugin v0.8.0/go.mod h1:rcm0Esd3P/GM2rtYTvz3+9Gf8w9zdo7rG8dKSxYHHIE= +buf.build/go/protoyaml v0.3.1 h1:ucyzE7DRnjX+mQ6AH4JzN0Kg50ByHHu+yrSKbgQn2D4= +buf.build/go/protoyaml v0.3.1/go.mod h1:0TzNpFQDXhwbkXb/ajLvxIijqbve+vMQvWY/b3/Dzxg= +buf.build/go/spdx v0.2.0 h1:IItqM0/cMxvFJJumcBuP8NrsIzMs/UYjp/6WSpq8LTw= +buf.build/go/spdx v0.2.0/go.mod h1:bXdwQFem9Si3nsbNy8aJKGPoaPi5DKwdeEp5/ArZ6w8= +cel.dev/expr v0.21.2 h1:o+Wj235dy4gFYlYin3JsMpp3EEfMrPm/6tdoyjT98S0= +cel.dev/expr v0.21.2/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw= +connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8= +connectrpc.com/otelconnect v0.7.2 h1:WlnwFzaW64dN06JXU+hREPUGeEzpz3Acz2ACOmN8cMI= +connectrpc.com/otelconnect v0.7.2/go.mod h1:JS7XUKfuJs2adhCnXhNHPHLz6oAaZniCJdSF00OZSew= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= +github.com/bufbuild/buf v1.51.0 h1:k2we7gmuSDeIqxkv16F/8s5Kk0l2ZfvMHpvC1n6o5Rk= +github.com/bufbuild/buf v1.51.0/go.mod h1:TbX4Df3BfE0Lugd3Y3sFr7QTxqmCfPkuiEexe29KZeE= +github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= +github.com/bufbuild/protoplugin v0.0.0-20250106231243-3a819552c9d9 h1:kAWER21DzhzU7ys8LL1WkSfbGkwXv+tM30hyEsYrW2k= +github.com/bufbuild/protoplugin v0.0.0-20250106231243-3a819552c9d9/go.mod h1:c5D8gWRIZ2HLWO3gXYTtUfw/hbJyD8xikv2ooPxnklQ= +github.com/bufbuild/protovalidate-go v0.9.3-0.20250317160558-38a17488914d h1:Y6Yp/LwSaRG8gw9GyyQD7jensL9NXqPlkbuulaAvCEE= +github.com/bufbuild/protovalidate-go v0.9.3-0.20250317160558-38a17488914d/go.mod h1:SZN6Qr3lPWuKMoQtIhKdhESkb+3m2vk0lqN9WMuZDDU= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/daixiang0/gci v0.13.6 h1:RKuEOSkGpSadkGbvZ6hJ4ddItT3cVZ9Vn9Rybk6xjl8= +github.com/daixiang0/gci v0.13.6/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY= +github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8= +github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= +github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= +github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8= +github.com/go-chi/chi/v5 v5.2.1/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/cel-go v0.24.1 h1:jsBCtxG8mM5wiUJDSGUqU0K7Mtr3w7Eyv00rw4DiZxI= +github.com/google/cel-go v0.24.1/go.mod h1:Hdf9TqOaTNSFQA1ybQaRqATVoK7m/zcf7IMhGXP5zI8= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= +github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 h1:+J3r2e8+RsmN3vKfo75g0YSY61ms37qzPglu4p0sGro= +github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jdx/go-netrc v1.0.0 h1:QbLMLyCZGj0NA8glAhxUpf1zDg6cxnWgMBbjq40W0gQ= +github.com/jdx/go-netrc v1.0.0/go.mod h1:Gh9eFQJnoTNIRHXl2j5bJXA1u84hQWJWgGh569zF3v8= +github.com/jhump/protoreflect/v2 v2.0.0-beta.2 h1:qZU+rEZUOYTz1Bnhi3xbwn+VxdXkLVeEpAeZzVXLY88= +github.com/jhump/protoreflect/v2 v2.0.0-beta.2/go.mod h1:4tnOYkB/mq7QTyS3YKtVtNrJv4Psqout8HA1U+hZtgM= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/mount v0.3.4 h1:yn5jq4STPztkkzSKpZkLcmjue+bZJ0u2AuQY1iNI1Ww= +github.com/moby/sys/mount v0.3.4/go.mod h1:KcQJMbQdJHPlq5lcYT+/CjatWM4PuxKe+XLSVS4J6Os= +github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= +github.com/moby/sys/reexec v0.1.0 h1:RrBi8e0EBTLEgfruBOFcxtElzRGTEUkeIFaVXgU7wok= +github.com/moby/sys/reexec v0.1.0/go.mod h1:EqjBg8F3X7iZe5pU6nRZnYCMUTXoxsjiIfHup5wYIN8= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= +github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q= +github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/segmentio/encoding v0.4.1 h1:KLGaLSW0jrmhB58Nn4+98spfvPvmo4Ci1P/WIQ9wn7w= +github.com/segmentio/encoding v0.4.1/go.mod h1:/d03Cd8PoaDeceuhUUUQWjU0KhWjrmYrWPgtJHYZSnI= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I= +github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.lsp.dev/jsonrpc2 v0.10.0 h1:Pr/YcXJoEOTMc/b6OTmcR1DPJ3mSWl/SWiU1Cct6VmI= +go.lsp.dev/jsonrpc2 v0.10.0/go.mod h1:fmEzIdXPi/rf6d4uFcayi8HpFP1nBF99ERP1htC72Ac= +go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 h1:hCzQgh6UcwbKgNSRurYWSqh8MufqRRPODRBblutn4TE= +go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2/go.mod h1:gtSHRuYfbCT0qnbLnovpie/WEmqyJ7T4n6VXiFMBtcw= +go.lsp.dev/protocol v0.12.0 h1:tNprUI9klQW5FAFVM4Sa+AbPFuVQByWhP1ttNUAjIWg= +go.lsp.dev/protocol v0.12.0/go.mod h1:Qb11/HgZQ72qQbeyPfJbu3hZBH23s1sr4st8czGeDMQ= +go.lsp.dev/uri v0.3.0 h1:KcZJmh6nFIBeJzTugn5JTU6OOyG0lDOo3R9KwTxTYbo= +go.lsp.dev/uri v0.3.0/go.mod h1:P5sbO1IQR+qySTWOCnhnK7phBx+W3zbLqSMDJNTw88I= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U= +go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 h1:aWwlzYV971S4BXRS9AmqwDLAD85ouC6X+pocatKY58c= +golang.org/x/exp v0.0.0-20250228200357-dead58393ab7/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +pluginrpc.com/pluginrpc v0.5.0 h1:tOQj2D35hOmvHyPu8e7ohW2/QvAnEtKscy2IJYWQ2yo= +pluginrpc.com/pluginrpc v0.5.0/go.mod h1:UNWZ941hcVAoOZUn8YZsMmOZBzbUjQa3XMns8RQLp9o= diff --git a/scripts/tools.go b/internal/tools/tools.go similarity index 79% rename from scripts/tools.go rename to internal/tools/tools.go index e1dd80232f..e57e37186f 100644 --- a/scripts/tools.go +++ b/internal/tools/tools.go @@ -18,8 +18,9 @@ package tools import ( + _ "github.com/bufbuild/buf/cmd/buf" + _ "github.com/daixiang0/gci/cmd/gci" _ "github.com/gogo/protobuf/protoc-gen-gogofast" - _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway" - _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger" - _ "golang.org/x/tools/cmd/goimports" + _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway" + _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2" ) diff --git a/model/exemplar/exemplar.go b/model/exemplar/exemplar.go index 08f55374ef..d03940f1b2 100644 --- a/model/exemplar/exemplar.go +++ b/model/exemplar/exemplar.go @@ -15,8 +15,10 @@ package exemplar import "github.com/prometheus/prometheus/model/labels" -// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters -// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars +// ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of +// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 +// UTF-8 characters." +// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars const ExemplarMaxLabelSetLength = 128 // Exemplar is additional information associated with a time series. @@ -49,7 +51,7 @@ func (e Exemplar) Equals(e2 Exemplar) bool { return e.Value == e2.Value } -// Sort first by timestamp, then value, then labels. +// Compare first timestamps, then values, then labels. func Compare(a, b Exemplar) int { if a.Ts < b.Ts { return -1 diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 2a37ea66d4..92f084bdf6 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -14,6 +14,7 @@ package histogram import ( + "errors" "fmt" "math" "strings" @@ -72,10 +73,8 @@ func (h *FloatHistogram) Copy() *FloatHistogram { } if h.UsesCustomBuckets() { - if len(h.CustomValues) != 0 { - c.CustomValues = make([]float64, len(h.CustomValues)) - copy(c.CustomValues, h.CustomValues) - } + // Custom values are interned, so no need to copy them. + c.CustomValues = h.CustomValues } else { c.ZeroThreshold = h.ZeroThreshold c.ZeroCount = h.ZeroCount @@ -116,9 +115,8 @@ func (h *FloatHistogram) CopyTo(to *FloatHistogram) { to.NegativeSpans = clearIfNotNil(to.NegativeSpans) to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) - - to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) - copy(to.CustomValues, h.CustomValues) + // Custom values are interned, so no need to copy them. + to.CustomValues = h.CustomValues } else { to.ZeroThreshold = h.ZeroThreshold to.ZeroCount = h.ZeroCount @@ -129,7 +127,8 @@ func (h *FloatHistogram) CopyTo(to *FloatHistogram) { to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) copy(to.NegativeBuckets, h.NegativeBuckets) - to.CustomValues = clearIfNotNil(to.CustomValues) + // Custom values are interned, so no need to reset them. + to.CustomValues = nil } to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) @@ -230,6 +229,17 @@ func (h *FloatHistogram) TestExpression() string { res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues)) } + switch m.CounterResetHint { + case UnknownCounterReset: + // Unknown is the default, don't add anything. + case CounterReset: + res = append(res, "counter_reset_hint:reset") + case NotCounterReset: + res = append(res, "counter_reset_hint:not_reset") + case GaugeType: + res = append(res, "counter_reset_hint:gauge") + } + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { if len(spans) > 1 { panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) @@ -293,6 +303,14 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { h.ZeroCount /= scalar h.Count /= scalar h.Sum /= scalar + // Division by zero removes all buckets. + if scalar == 0 { + h.PositiveBuckets = nil + h.NegativeBuckets = nil + h.PositiveSpans = nil + h.NegativeSpans = nil + return h + } for i := range h.PositiveBuckets { h.PositiveBuckets[i] /= scalar } @@ -342,7 +360,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { default: // All other cases shouldn't actually happen. // They are a direct collision of CounterReset and NotCounterReset. - // Conservatively set the CounterResetHint to "unknown" and isse a warning. + // Conservatively set the CounterResetHint to "unknown" and issue a warning. h.CounterResetHint = UnknownCounterReset // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place } @@ -658,7 +676,7 @@ func detectReset(currIt, prevIt *floatBucketIterator) bool { if !currIt.Next() { // Reached end of currIt early, therefore // previous histogram has a bucket that the - // current one does not have. Unlass all + // current one does not have. Unless all // remaining buckets in the previous histogram // are unpopulated, this is a reset. for { @@ -765,16 +783,16 @@ func (h *FloatHistogram) Validate() error { return fmt.Errorf("custom buckets: %w", err) } if h.ZeroCount != 0 { - return fmt.Errorf("custom buckets: must have zero count of 0") + return errors.New("custom buckets: must have zero count of 0") } if h.ZeroThreshold != 0 { - return fmt.Errorf("custom buckets: must have zero threshold of 0") + return errors.New("custom buckets: must have zero threshold of 0") } if len(h.NegativeSpans) > 0 { - return fmt.Errorf("custom buckets: must not have negative spans") + return errors.New("custom buckets: must not have negative spans") } if len(h.NegativeBuckets) > 0 { - return fmt.Errorf("custom buckets: must not have negative buckets") + return errors.New("custom buckets: must not have negative buckets") } } else { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { @@ -788,7 +806,7 @@ func (h *FloatHistogram) Validate() error { return fmt.Errorf("negative side: %w", err) } if h.CustomValues != nil { - return fmt.Errorf("histogram with exponential schema must not have custom bounds") + return errors.New("histogram with exponential schema must not have custom bounds") } } err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) @@ -891,7 +909,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() { // reconcileZeroBuckets finds a zero bucket large enough to include the zero // buckets of both histograms (the receiving histogram and the other histogram) // with a zero threshold that is not within a populated bucket in either -// histogram. This method modifies the receiving histogram accourdingly, but +// histogram. This method modifies the receiving histogram accordingly, but // leaves the other histogram as is. Instead, it returns the zero count the // other histogram would have if it were modified. func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { @@ -929,10 +947,10 @@ func (h *FloatHistogram) floatBucketIterator( positive bool, absoluteStartValue float64, targetSchema int32, ) floatBucketIterator { if h.UsesCustomBuckets() && targetSchema != h.Schema { - panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema")) + panic(errors.New("cannot merge from custom buckets schema to exponential schema")) } if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) { - panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema")) + panic(errors.New("cannot merge from exponential buckets schema to custom schema")) } if targetSchema > h.Schema { panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) @@ -996,7 +1014,7 @@ type floatBucketIterator struct { func (i *floatBucketIterator) At() Bucket[float64] { // Need to use i.targetSchema rather than i.baseBucketIterator.schema. - return i.baseBucketIterator.at(i.targetSchema) + return i.at(i.targetSchema) } func (i *floatBucketIterator) Next() bool { diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 1558a6d679..34988e9d39 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -131,6 +131,54 @@ func TestFloatHistogramMul(t *testing.T) { NegativeBuckets: []float64{9, 3, 15, 18}, }, }, + { + "negation", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -11, + Count: -30, + Sum: -23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-1, 0, -3, -4, -7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-3, -1, -5, -6}, + }, + }, + { + "negative multiplier", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -22, + Count: -60, + Sum: -46, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-2, 0, -6, -8, -14}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-6, -2, -10, -12}, + }, + }, { "no-op with custom buckets", &FloatHistogram{ @@ -351,14 +399,10 @@ func TestFloatHistogramDiv(t *testing.T) { }, 0, &FloatHistogram{ - ZeroThreshold: 0.01, - ZeroCount: math.Inf(1), - Count: math.Inf(1), - Sum: math.Inf(1), - PositiveSpans: []Span{{-2, 1}, {2, 3}}, - PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)}, - NegativeSpans: []Span{{3, 2}, {3, 2}}, - NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)}, + ZeroThreshold: 0.01, + Count: math.Inf(1), + Sum: math.Inf(1), + ZeroCount: math.Inf(1), }, }, { @@ -409,6 +453,54 @@ func TestFloatHistogramDiv(t *testing.T) { NegativeBuckets: []float64{1.5, 0.5, 2.5, 3}, }, }, + { + "negation", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + -1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -5.5, + Count: -3493.3, + Sum: -2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{-1, -3.3, -4.2, -0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-3.1, -3, -1.234e5, -1000}, + }, + }, + { + "negative half", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -5.5, + Count: -15, + Sum: -11.5, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-0.5, 0, -1.5, -2, -3.5}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-1.5, -0.5, -2.5, -3}, + }, + }, { "no-op with custom buckets", &FloatHistogram{ diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index e4b99ec420..cfb63e6341 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -14,6 +14,7 @@ package histogram import ( + "errors" "fmt" "math" "slices" @@ -101,10 +102,8 @@ func (h *Histogram) Copy() *Histogram { } if h.UsesCustomBuckets() { - if len(h.CustomValues) != 0 { - c.CustomValues = make([]float64, len(h.CustomValues)) - copy(c.CustomValues, h.CustomValues) - } + // Custom values are interned, it's ok to copy by reference. + c.CustomValues = h.CustomValues } else { c.ZeroThreshold = h.ZeroThreshold c.ZeroCount = h.ZeroCount @@ -145,9 +144,8 @@ func (h *Histogram) CopyTo(to *Histogram) { to.NegativeSpans = clearIfNotNil(to.NegativeSpans) to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) - - to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) - copy(to.CustomValues, h.CustomValues) + // Custom values are interned, it's ok to copy by reference. + to.CustomValues = h.CustomValues } else { to.ZeroThreshold = h.ZeroThreshold to.ZeroCount = h.ZeroCount @@ -157,8 +155,8 @@ func (h *Histogram) CopyTo(to *Histogram) { to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) copy(to.NegativeBuckets, h.NegativeBuckets) - - to.CustomValues = clearIfNotNil(to.CustomValues) + // Custom values are interned, no need to reset. + to.CustomValues = nil } to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) @@ -378,9 +376,8 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { fh.ZeroCount = 0 fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans) fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets) - - fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues)) - copy(fh.CustomValues, h.CustomValues) + // Custom values are interned, it's ok to copy by reference. + fh.CustomValues = h.CustomValues } else { fh.ZeroThreshold = h.ZeroThreshold fh.ZeroCount = float64(h.ZeroCount) @@ -394,7 +391,8 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { currentNegative += float64(b) fh.NegativeBuckets[i] = currentNegative } - fh.CustomValues = clearIfNotNil(fh.CustomValues) + // Custom values are interned, no need to reset. + fh.CustomValues = nil } fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) @@ -432,16 +430,16 @@ func (h *Histogram) Validate() error { return fmt.Errorf("custom buckets: %w", err) } if h.ZeroCount != 0 { - return fmt.Errorf("custom buckets: must have zero count of 0") + return errors.New("custom buckets: must have zero count of 0") } if h.ZeroThreshold != 0 { - return fmt.Errorf("custom buckets: must have zero threshold of 0") + return errors.New("custom buckets: must have zero threshold of 0") } if len(h.NegativeSpans) > 0 { - return fmt.Errorf("custom buckets: must not have negative spans") + return errors.New("custom buckets: must not have negative spans") } if len(h.NegativeBuckets) > 0 { - return fmt.Errorf("custom buckets: must not have negative buckets") + return errors.New("custom buckets: must not have negative buckets") } } else { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { @@ -455,7 +453,7 @@ func (h *Histogram) Validate() error { return fmt.Errorf("negative side: %w", err) } if h.CustomValues != nil { - return fmt.Errorf("histogram with exponential schema must not have custom bounds") + return errors.New("histogram with exponential schema must not have custom bounds") } } err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) diff --git a/model/histogram/test_utils.go b/model/histogram/test_utils.go index 9e9a711c29..e6b33863bd 100644 --- a/model/histogram/test_utils.go +++ b/model/histogram/test_utils.go @@ -19,12 +19,12 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram { bucketsPerSide := numBuckets / 2 spanLength := uint32(bucketsPerSide / numSpans) // Given all bucket deltas are 1, sum bucketsPerSide + 1. - observationCount := bucketsPerSide * (1 + bucketsPerSide) + observationCount := uint64(bucketsPerSide) * (1 + uint64(bucketsPerSide)) var histograms []*Histogram for i := 0; i < numHistograms; i++ { h := &Histogram{ - Count: uint64(i + observationCount), + Count: uint64(i) + observationCount, ZeroCount: uint64(i), ZeroThreshold: 1e-128, Sum: 18.4 * float64(i+1), diff --git a/model/labels/labels.go b/model/labels/labels.go index 01514abf38..2e8dc52cb3 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !stringlabels && !dedupelabels +//go:build slicelabels package labels @@ -19,6 +19,7 @@ import ( "bytes" "slices" "strings" + "unsafe" "github.com/cespare/xxhash/v2" ) @@ -31,17 +32,17 @@ func (ls Labels) Len() int { return len(ls) } func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } -// Bytes returns ls as a byte slice. -// It uses an byte invalid character as a separator and so should not be used for printing. +// Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key. +// Encoding may change over time or between runs of Prometheus. func (ls Labels) Bytes(buf []byte) []byte { b := bytes.NewBuffer(buf[:0]) b.WriteByte(labelSep) for i, l := range ls { if i > 0 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(l.Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(l.Value) } return b.Bytes() @@ -86,9 +87,9 @@ func (ls Labels) Hash() uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b) } @@ -106,9 +107,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { i++ default: b = append(b, ls[i].Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, ls[i].Value...) - b = append(b, seps[0]) + b = append(b, sep) i++ j++ } @@ -130,9 +131,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, ls[i].Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, ls[i].Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -151,10 +152,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { i++ default: if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(ls[i].Value) i++ j++ @@ -177,10 +178,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { continue } if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(ls[i].Value) } return b.Bytes() @@ -249,15 +250,7 @@ func (ls Labels) WithoutEmpty() Labels { // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { - if len(ls) != len(o) { - return false - } - for i, l := range ls { - if l != o[i] { - return false - } - } - return true + return slices.Equal(ls, o) } // EmptyLabels returns n empty Labels value, for convenience. @@ -315,7 +308,8 @@ func Compare(a, b Labels) int { return len(a) - len(b) } -// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +// CopyFrom copies labels from b on top of whatever was in ls previously, +// reusing memory or expanding if needed. func (ls *Labels) CopyFrom(b Labels) { (*ls) = append((*ls)[:0], b...) } @@ -342,16 +336,29 @@ func (ls Labels) Validate(f func(l Label) error) error { return nil } -// DropMetricName returns Labels with "__name__" removed. +// DropMetricName returns Labels with the "__name__" removed. +// Deprecated: Use DropReserved instead. func (ls Labels) DropMetricName() Labels { + return ls.DropReserved(func(n string) bool { return n == MetricName }) +} + +// DropReserved returns Labels without the chosen (via shouldDropFn) reserved (starting with underscore) labels. +func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels { + rm := 0 for i, l := range ls { - if l.Name == MetricName { + if l.Name[0] > '_' { // Stop looking if we've gone past special labels. + break + } + if shouldDropFn(l.Name) { + i := i - rm // Offsetting after removals. if i == 0 { // Make common case fast with no allocations. - return ls[1:] + ls = ls[1:] + } else { + // Avoid modifying original Labels - use [:i:i] so that left slice would not + // have any spare capacity and append would have to allocate a new slice for the result. + ls = append(ls[:i:i], ls[i+1:]...) } - // Avoid modifying original Labels - use [:i:i] so that left slice would not - // have any spare capacity and append would have to allocate a new slice for the result. - return append(ls[:i:i], ls[i+1:]...) + rm++ } } return ls @@ -422,7 +429,7 @@ type ScratchBuilder struct { add Labels } -// Symbol-table is no-op, just for api parity with dedupelabels. +// SymbolTable is no-op, just for api parity with dedupelabels. type SymbolTable struct{} func NewSymbolTable() *SymbolTable { return nil } @@ -458,8 +465,8 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } -// Add a name/value pair, using []byte instead of string. -// The '-tags stringlabels' version of this function is unsafe, hence the name. +// UnsafeAddBytes adds a name/value pair, using []byte instead of string. +// The default version of this function is unsafe, hence the name. // This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { b.add = append(b.add, Label{Name: string(name), Value: string(value)}) @@ -475,15 +482,20 @@ func (b *ScratchBuilder) Assign(ls Labels) { b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. } -// Return the name/value pairs added so far as a Labels object. +// Labels returns the name/value pairs added so far as a Labels object. // Note: if you want them sorted, call Sort() first. func (b *ScratchBuilder) Labels() Labels { // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. return append([]Label{}, b.add...) } -// Write the newly-built Labels out to ls. +// Overwrite the newly-built Labels out to ls. // Callers must ensure that there are no other references to ls, or any strings fetched from it. func (b *ScratchBuilder) Overwrite(ls *Labels) { *ls = append((*ls)[:0], b.add...) } + +// SizeOfLabels returns the approximate space required for n copies of a label. +func SizeOfLabels(name, value string, n uint64) uint64 { + return (uint64(len(name)) + uint64(unsafe.Sizeof(name)) + uint64(len(value)) + uint64(unsafe.Sizeof(value))) * n +} diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index 4bc94f84fe..5f46d6c35f 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -24,17 +24,20 @@ import ( ) const ( - MetricName = "__name__" - AlertName = "alertname" - BucketLabel = "le" - InstanceName = "instance" + // MetricName is a special label name that represent a metric name. + // Deprecated: Use schema.Metadata structure and its methods. + MetricName = "__name__" - labelSep = '\xfe' + AlertName = "alertname" + BucketLabel = "le" + + labelSep = '\xfe' // Used at beginning of `Bytes` return. + sep = '\xff' // Used between labels in `Bytes` and `Hash`. ) -var seps = []byte{'\xff'} +var seps = []byte{sep} // Used with Hash, which has no WriteByte method. -// Label is a key/value pair of strings. +// Label is a key/value a pair of strings. type Label struct { Name, Value string } @@ -50,7 +53,11 @@ func (ls Labels) String() string { b.WriteByte(',') b.WriteByte(' ') } - b.WriteString(l.Name) + if !model.LabelName(l.Name).IsValidLegacy() { + b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Name)) + } else { + b.WriteString(l.Name) + } b.WriteByte('=') b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Value)) i++ @@ -94,12 +101,23 @@ func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { } // IsValid checks if the metric name or label names are valid. -func (ls Labels) IsValid() bool { +func (ls Labels) IsValid(validationScheme model.ValidationScheme) bool { err := ls.Validate(func(l Label) error { - if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return strconv.ErrSyntax + if l.Name == model.MetricNameLabel { + // If the default validation scheme has been overridden with legacy mode, + // we need to call the special legacy validation checker. + if validationScheme == model.LegacyValidation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) { + return strconv.ErrSyntax + } + if !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } } - if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + if validationScheme == model.LegacyValidation { + if !model.LabelName(l.Name).IsValidLegacy() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + } else if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { return strconv.ErrSyntax } return nil @@ -151,10 +169,8 @@ func (b *Builder) Del(ns ...string) *Builder { // Keep removes all labels from the base except those with the given names. func (b *Builder) Keep(ns ...string) *Builder { b.base.Range(func(l Label) { - for _, n := range ns { - if l.Name == n { - return - } + if slices.Contains(ns, l.Name) { + return } b.del = append(b.del, l.Name) }) @@ -218,5 +234,5 @@ func contains(s []Label, n string) bool { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go index 0e5bb048be..64e0a69b83 100644 --- a/model/labels/labels_dedupelabels.go +++ b/model/labels/labels_dedupelabels.go @@ -140,19 +140,19 @@ func decodeString(t *nameTable, data string, index int) (string, int) { return t.ToName(num), index } -// Bytes returns ls as a byte slice. -// It uses non-printing characters and so should not be used for printing. +// Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key. +// Encoding may change over time or between runs of Prometheus. func (ls Labels) Bytes(buf []byte) []byte { b := bytes.NewBuffer(buf[:0]) for i := 0; i < len(ls.data); { if i > 0 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } var name, value string name, i = decodeString(ls.syms, ls.data, i) value, i = decodeString(ls.syms, ls.data, i) b.WriteString(name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(value) } return b.Bytes() @@ -201,9 +201,9 @@ func (ls Labels) Hash() uint64 { } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) pos = newPos } return xxhash.Sum64(b) @@ -226,9 +226,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { } if name == names[j] { b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } } @@ -252,9 +252,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -275,10 +275,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { } if lName == names[j] { if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(lName) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(lValue) } pos = newPos @@ -299,10 +299,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { } if j == len(names) || lName != names[j] { if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(lName) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(lValue) } pos = newPos @@ -554,20 +554,27 @@ func (ls Labels) ReleaseStrings(release func(string)) { // TODO: remove these calls as there is nothing to do. } -// DropMetricName returns Labels with "__name__" removed. +// DropMetricName returns Labels with the "__name__" removed. +// Deprecated: Use DropReserved instead. func (ls Labels) DropMetricName() Labels { + return ls.DropReserved(func(n string) bool { return n == MetricName }) +} + +// DropReserved returns Labels without the chosen (via shouldDropFn) reserved (starting with underscore) labels. +func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels { for i := 0; i < len(ls.data); { lName, i2 := decodeString(ls.syms, ls.data, i) _, i2 = decodeVarint(ls.data, i2) - if lName == MetricName { + if lName[0] > '_' { // Stop looking if we've gone past special labels. + break + } + if shouldDropFn(lName) { if i == 0 { // Make common case fast with no allocations. ls.data = ls.data[i2:] } else { ls.data = ls.data[:i] + ls.data[i2:] } - break - } else if lName[0] > MetricName[0] { // Stop looking if we've gone past. - break + continue } i = i2 } @@ -815,3 +822,8 @@ func (b *ScratchBuilder) Overwrite(ls *Labels) { ls.syms = b.syms.nameTable ls.data = yoloString(b.overwriteBuffer) } + +// SizeOfLabels returns the approximate space required for n copies of a label. +func SizeOfLabels(name, value string, n uint64) uint64 { + return uint64(len(name)+len(value)) + n*4 // Assuming most symbol-table entries are 2 bytes long. +} diff --git a/model/labels/labels_dedupelabels_test.go b/model/labels/labels_dedupelabels_test.go index 5ef9255c21..baf0423db2 100644 --- a/model/labels/labels_dedupelabels_test.go +++ b/model/labels/labels_dedupelabels_test.go @@ -21,6 +21,15 @@ import ( "github.com/stretchr/testify/require" ) +var expectedSizeOfLabels = []uint64{ // Values must line up with testCaseLabels. + 16, + 0, + 41, + 270, + 271, + 325, +} + func TestVarint(t *testing.T) { cases := []struct { v int diff --git a/model/labels/labels_slicelabels_test.go b/model/labels/labels_slicelabels_test.go new file mode 100644 index 0000000000..e9edc9152d --- /dev/null +++ b/model/labels/labels_slicelabels_test.go @@ -0,0 +1,25 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build slicelabels + +package labels + +var expectedSizeOfLabels = []uint64{ // Values must line up with testCaseLabels. + 72, + 0, + 97, + 326, + 327, + 549, +} diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index bccceb61fe..a2b16cac76 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -11,12 +11,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build stringlabels +//go:build !slicelabels && !dedupelabels package labels import ( - "reflect" "slices" "strings" "unsafe" @@ -25,31 +24,25 @@ import ( ) // Labels is implemented by a single flat string holding name/value pairs. -// Each name and value is preceded by its length in varint encoding. +// Each name and value is preceded by its length, encoded as a single byte +// for size 0-254, or the following 3 bytes little-endian, if the first byte is 255. +// Maximum length allowed is 2^24 or 16MB. // Names are in order. type Labels struct { data string } func decodeSize(data string, index int) (int, int) { - // Fast-path for common case of a single byte, value 0..127. b := data[index] index++ - if b < 0x80 { - return int(b), index - } - size := int(b & 0x7F) - for shift := uint(7); ; shift += 7 { + if b == 255 { + // Larger numbers are encoded as 3 bytes little-endian. // Just panic if we go of the end of data, since all Labels strings are constructed internally and // malformed data indicates a bug, or memory corruption. - b := data[index] - index++ - size |= int(b&0x7F) << shift - if b < 0x80 { - break - } + return int(data[index]) + (int(data[index+1]) << 8) + (int(data[index+2]) << 16), index + 3 } - return size, index + // More common case of a single byte, value 0..254. + return int(b), index } func decodeString(data string, index int) (string, int) { @@ -58,8 +51,8 @@ func decodeString(data string, index int) (string, int) { return data[index : index+size], index + size } -// Bytes returns ls as a byte slice. -// It uses non-printing characters and so should not be used for printing. +// Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key. +// Encoding may change over time or between runs of Prometheus. func (ls Labels) Bytes(buf []byte) []byte { if cap(buf) < len(ls.data) { buf = make([]byte, len(ls.data)) @@ -77,7 +70,7 @@ func (ls Labels) IsZero() bool { // MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. // If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. -// TODO: This is only used in printing an error message +// TODO: This is only used in printing an error message. func (ls Labels) MatchLabels(on bool, names ...string) Labels { b := NewBuilder(ls) if on { @@ -112,9 +105,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { } if name == names[j] { b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } } @@ -138,9 +131,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -299,10 +292,9 @@ func Equal(ls, o Labels) bool { func EmptyLabels() Labels { return Labels{} } -func yoloBytes(s string) (b []byte) { - *(*string)(unsafe.Pointer(&b)) = s - (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) - return + +func yoloBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) } // New returns a sorted Labels from the given labels. @@ -338,8 +330,8 @@ func Compare(a, b Labels) int { } i := 0 // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. - sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) - lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + sp := unsafe.Pointer(unsafe.StringData(shorter)) + lp := unsafe.Pointer(unsafe.StringData(longer)) for ; i < len(shorter)-8; i += 8 { if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { break @@ -373,7 +365,7 @@ func Compare(a, b Labels) int { return +1 } -// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +// CopyFrom will copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. func (ls *Labels) CopyFrom(b Labels) { ls.data = b.data // strings are immutable } @@ -421,21 +413,28 @@ func (ls Labels) Validate(f func(l Label) error) error { return nil } -// DropMetricName returns Labels with "__name__" removed. +// DropMetricName returns Labels with the "__name__" removed. +// Deprecated: Use DropReserved instead. func (ls Labels) DropMetricName() Labels { + return ls.DropReserved(func(n string) bool { return n == MetricName }) +} + +// DropReserved returns Labels without the chosen (via shouldDropFn) reserved (starting with underscore) labels. +func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels { for i := 0; i < len(ls.data); { lName, i2 := decodeString(ls.data, i) size, i2 := decodeSize(ls.data, i2) i2 += size - if lName == MetricName { + if lName[0] > '_' { // Stop looking if we've gone past special labels. + break + } + if shouldDropFn(lName) { if i == 0 { // Make common case fast with no allocations. ls.data = ls.data[i2:] } else { ls.data = ls.data[:i] + ls.data[i2:] } - break - } else if lName[0] > MetricName[0] { // Stop looking if we've gone past. - break + continue } i = i2 } @@ -443,11 +442,11 @@ func (ls Labels) DropMetricName() Labels { } // InternStrings is a no-op because it would only save when the whole set of labels is identical. -func (ls *Labels) InternStrings(intern func(string) string) { +func (ls *Labels) InternStrings(_ func(string) string) { } // ReleaseStrings is a no-op for the same reason as InternStrings. -func (ls Labels) ReleaseStrings(release func(string)) { +func (ls Labels) ReleaseStrings(_ func(string)) { } // Builder allows modifying Labels. @@ -530,48 +529,27 @@ func marshalLabelToSizedBuffer(m *Label, data []byte) int { return len(data) - i } -func sizeVarint(x uint64) (n int) { - // Most common case first - if x < 1<<7 { +func sizeWhenEncoded(x uint64) (n int) { + if x < 255 { return 1 + } else if x <= 1<<24 { + return 4 } - if x >= 1<<56 { - return 9 - } - if x >= 1<<28 { - x >>= 28 - n = 4 - } - if x >= 1<<14 { - x >>= 14 - n += 2 - } - if x >= 1<<7 { - n++ - } - return n + 1 + panic("String too long to encode as label.") } -func encodeVarint(data []byte, offset int, v uint64) int { - offset -= sizeVarint(v) - base := offset - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return base -} - -// Special code for the common case that a size is less than 128 func encodeSize(data []byte, offset, v int) int { - if v < 1<<7 { + if v < 255 { offset-- data[offset] = uint8(v) return offset } - return encodeVarint(data, offset, uint64(v)) + offset -= 4 + data[offset] = 255 + data[offset+1] = byte(v) + data[offset+2] = byte((v >> 8)) + data[offset+3] = byte((v >> 16)) + return offset } func labelsSize(lbls []Label) (n int) { @@ -585,9 +563,9 @@ func labelsSize(lbls []Label) (n int) { func labelSize(m *Label) (n int) { // strings are encoded as length followed by contents. l := len(m.Name) - n += l + sizeVarint(uint64(l)) + n += l + sizeWhenEncoded(uint64(l)) l = len(m.Value) - n += l + sizeVarint(uint64(l)) + n += l + sizeWhenEncoded(uint64(l)) return n } @@ -633,7 +611,7 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } -// Add a name/value pair, using []byte instead of string to reduce memory allocations. +// UnsafeAddBytes adds a name/value pair using []byte instead of string to reduce memory allocations. // The values must remain live until Labels() is called. func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) @@ -661,7 +639,7 @@ func (b *ScratchBuilder) Labels() Labels { return b.output } -// Write the newly-built Labels out to ls, reusing an internal buffer. +// Overwrite will write the newly-built Labels out to ls, reusing an internal buffer. // Callers must ensure that there are no other references to ls, or any strings fetched from it. func (b *ScratchBuilder) Overwrite(ls *Labels) { size := labelsSize(b.add) @@ -674,7 +652,7 @@ func (b *ScratchBuilder) Overwrite(ls *Labels) { ls.data = yoloString(b.overwriteBuffer) } -// Symbol-table is no-op, just for api parity with dedupelabels. +// SymbolTable is no-op, just for api parity with dedupelabels. type SymbolTable struct{} func NewSymbolTable() *SymbolTable { return nil } @@ -694,3 +672,8 @@ func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { // no-op } + +// SizeOfLabels returns the approximate space required for n copies of a label. +func SizeOfLabels(name, value string, n uint64) uint64 { + return uint64(labelSize(&Label{Name: name, Value: value})) * n +} diff --git a/model/labels/labels_stringlabels_test.go b/model/labels/labels_stringlabels_test.go new file mode 100644 index 0000000000..aaa8b52415 --- /dev/null +++ b/model/labels/labels_stringlabels_test.go @@ -0,0 +1,25 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !slicelabels && !dedupelabels + +package labels + +var expectedSizeOfLabels = []uint64{ // Values must line up with testCaseLabels. + 12, + 0, + 37, + 266, + 270, + 309, +} diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index d8910cdc85..3b4f802160 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -21,27 +21,38 @@ import ( "strings" "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) +var ( + s254 = strings.Repeat("x", 254) // Edge cases for stringlabels encoding. + s255 = strings.Repeat("x", 255) +) + +var testCaseLabels = []Labels{ + FromStrings("t1", "t1", "t2", "t2"), + {}, + FromStrings("service.name", "t1", "whatever\\whatever", "t2"), + FromStrings("aaa", "111", "xx", s254), + FromStrings("aaa", "111", "xx", s255), + FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), +} + func TestLabels_String(t *testing.T) { - cases := []struct { - labels Labels - expected string - }{ - { - labels: FromStrings("t1", "t1", "t2", "t2"), - expected: "{t1=\"t1\", t2=\"t2\"}", - }, - { - labels: Labels{}, - expected: "{}", - }, + expected := []string{ // Values must line up with testCaseLabels. + "{t1=\"t1\", t2=\"t2\"}", + "{}", + `{"service.name"="t1", "whatever\\whatever"="t2"}`, + `{aaa="111", xx="` + s254 + `"}`, + `{aaa="111", xx="` + s255 + `"}`, + `{" container"="prometheus", " namespace"="observability-prometheus", __name__="kube_pod_container_status_last_terminated_exitcode", cluster="prod-af-north-0", instance="kube-state-metrics-0:kube-state-metrics:ksm", job="kube-state-metrics/kube-state-metrics", pod="observability-prometheus-0", uid="d3ec90b2-4975-4607-b45d-b9ad64bb417e"}`, } - for _, c := range cases { - str := c.labels.String() - require.Equal(t, c.expected, str) + require.Len(t, expected, len(testCaseLabels)) + for i, c := range expected { + str := testCaseLabels[i].String() + require.Equal(t, c, str) } } @@ -52,6 +63,17 @@ func BenchmarkString(b *testing.B) { } } +func TestSizeOfLabels(t *testing.T) { + require.Len(t, expectedSizeOfLabels, len(testCaseLabels)) + for i, c := range expectedSizeOfLabels { // Declared in build-tag-specific files, e.g. labels_slicelabels_test.go. + var total uint64 + testCaseLabels[i].Range(func(l Label) { + total += SizeOfLabels(l.Name, l.Value, 1) + }) + require.Equal(t, c, total) + } +} + func TestLabels_MatchLabels(t *testing.T) { labels := FromStrings( "__name__", "ALERTS", @@ -272,11 +294,58 @@ func TestLabels_IsValid(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - require.Equal(t, test.expected, test.input.IsValid()) + require.Equal(t, test.expected, test.input.IsValid(model.LegacyValidation)) }) } } +func TestLabels_ValidationModes(t *testing.T) { + for _, test := range []struct { + input Labels + callMode model.ValidationScheme + expected bool + }{ + { + input: FromStrings( + "__name__", "test.metric", + "hostname", "localhost", + "job", "check", + ), + callMode: model.UTF8Validation, + expected: true, + }, + { + input: FromStrings( + "__name__", "test", + "\xc5 bad utf8", "localhost", + "job", "check", + ), + callMode: model.UTF8Validation, + expected: false, + }, + { + input: FromStrings( + "__name__", "test.utf8.metric", + "hostname", "localhost", + "job", "check", + ), + callMode: model.LegacyValidation, + expected: false, + }, + { + input: FromStrings( + "__name__", "test", + "host.name", "localhost", + "job", "check", + ), + callMode: model.LegacyValidation, + expected: false, + }, + } { + require.Equal(t, test.expected, test.input.IsValid(test.callMode)) + } +} + func TestLabels_Equal(t *testing.T) { labels := FromStrings( "aaa", "111", @@ -451,7 +520,7 @@ func TestLabels_Has(t *testing.T) { } func TestLabels_Get(t *testing.T) { - require.Equal(t, "", FromStrings("aaa", "111", "bbb", "222").Get("foo")) + require.Empty(t, FromStrings("aaa", "111", "bbb", "222").Get("foo")) require.Equal(t, "111", FromStrings("aaaa", "111", "bbb", "222").Get("aaaa")) require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb")) } @@ -461,11 +530,25 @@ func TestLabels_DropMetricName(t *testing.T) { require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(MetricName, "myname", "aaa", "111").DropMetricName())) original := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222") - check := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222") + check := original.Copy() require.True(t, Equal(FromStrings("__aaa__", "111", "bbb", "222"), check.DropMetricName())) require.True(t, Equal(original, check)) } +func TestLabels_DropReserved(t *testing.T) { + shouldDropFn := func(n string) bool { + return n == MetricName || n == "__something__" + } + require.True(t, Equal(FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").DropReserved(shouldDropFn))) + require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(MetricName, "myname", "aaa", "111").DropReserved(shouldDropFn))) + require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(MetricName, "myname", "__something__", string(model.MetricTypeCounter), "aaa", "111").DropReserved(shouldDropFn))) + + original := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222") + check := original.Copy() + require.True(t, Equal(FromStrings("__aaa__", "111", "bbb", "222"), check.DropReserved(shouldDropFn))) + require.True(t, Equal(original, check)) +} + func ScratchBuilderForBenchmark() ScratchBuilder { // (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.) b := NewScratchBuilder(256) @@ -878,7 +961,7 @@ func TestMarshaling(t *testing.T) { expectedJSON := "{\"aaa\":\"111\",\"bbb\":\"2222\",\"ccc\":\"33333\"}" b, err := json.Marshal(lbls) require.NoError(t, err) - require.Equal(t, expectedJSON, string(b)) + require.JSONEq(t, expectedJSON, string(b)) var gotJ Labels err = json.Unmarshal(b, &gotJ) @@ -888,7 +971,7 @@ func TestMarshaling(t *testing.T) { expectedYAML := "aaa: \"111\"\nbbb: \"2222\"\nccc: \"33333\"\n" b, err = yaml.Marshal(lbls) require.NoError(t, err) - require.Equal(t, expectedYAML, string(b)) + require.YAMLEq(t, expectedYAML, string(b)) var gotY Labels err = yaml.Unmarshal(b, &gotY) @@ -904,7 +987,7 @@ func TestMarshaling(t *testing.T) { b, err = json.Marshal(f) require.NoError(t, err) expectedJSONFromStruct := "{\"a_labels\":" + expectedJSON + "}" - require.Equal(t, expectedJSONFromStruct, string(b)) + require.JSONEq(t, expectedJSONFromStruct, string(b)) var gotFJ foo err = json.Unmarshal(b, &gotFJ) @@ -914,7 +997,7 @@ func TestMarshaling(t *testing.T) { b, err = yaml.Marshal(f) require.NoError(t, err) expectedYAMLFromStruct := "a_labels:\n aaa: \"111\"\n bbb: \"2222\"\n ccc: \"33333\"\n" - require.Equal(t, expectedYAMLFromStruct, string(b)) + require.YAMLEq(t, expectedYAMLFromStruct, string(b)) var gotFY foo err = yaml.Unmarshal(b, &gotFY) diff --git a/model/labels/regexp.go b/model/labels/regexp.go index d2151d83dd..1636aacc21 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -63,13 +63,13 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { // available, even if the string matcher is faster. m.matchString = m.stringMatcher.Matches } else { - parsed, err := syntax.Parse(v, syntax.Perl) + parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL) if err != nil { return nil, err } // Simplify the syntax tree to run faster. parsed = parsed.Simplify() - m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$") + m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$") if err != nil { return nil, err } @@ -95,12 +95,7 @@ func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool { return func(s string) bool { if len(m.setMatches) != 0 { - for _, match := range m.setMatches { - if match == s { - return true - } - } - return false + return slices.Contains(m.setMatches, s) } if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { return false @@ -771,16 +766,11 @@ func (m *equalMultiStringSliceMatcher) setMatches() []string { func (m *equalMultiStringSliceMatcher) Matches(s string) bool { if m.caseSensitive { - for _, v := range m.values { - if s == v { - return true - } - } - } else { - for _, v := range m.values { - if strings.EqualFold(s, v) { - return true - } + return slices.Contains(m.values, s) + } + for _, v := range m.values { + if strings.EqualFold(s, v) { + return true } } return false @@ -802,7 +792,7 @@ type equalMultiStringMapMatcher struct { func (m *equalMultiStringMapMatcher) add(s string) { if !m.caseSensitive { - s = toNormalisedLower(s) + s = toNormalisedLower(s, nil) // Don't pass a stack buffer here - it will always escape to heap. } m.values[s] = struct{}{} @@ -840,15 +830,24 @@ func (m *equalMultiStringMapMatcher) setMatches() []string { } func (m *equalMultiStringMapMatcher) Matches(s string) bool { - if !m.caseSensitive { - s = toNormalisedLower(s) + if len(m.values) > 0 { + sNorm := s + var a [32]byte + if !m.caseSensitive { + sNorm = toNormalisedLower(s, a[:]) + } + if _, ok := m.values[sNorm]; ok { + return true + } } - if _, ok := m.values[s]; ok { - return true - } if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen { - for _, matcher := range m.prefixes[s[:m.minPrefixLen]] { + prefix := s[:m.minPrefixLen] + var a [32]byte + if !m.caseSensitive { + prefix = toNormalisedLower(s[:m.minPrefixLen], a[:]) + } + for _, matcher := range m.prefixes[prefix] { if matcher.Matches(s) { return true } @@ -859,22 +858,37 @@ func (m *equalMultiStringMapMatcher) Matches(s string) bool { // toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert // it to lower case. -func toNormalisedLower(s string) string { - var buf []byte +func toNormalisedLower(s string, a []byte) string { for i := 0; i < len(s); i++ { c := s[i] if c >= utf8.RuneSelf { return strings.Map(unicode.ToLower, norm.NFKD.String(s)) } if 'A' <= c && c <= 'Z' { - if buf == nil { - buf = []byte(s) - } - buf[i] = c + 'a' - 'A' + return toNormalisedLowerSlow(s, i, a) } } - if buf == nil { - return s + return s +} + +// toNormalisedLowerSlow is split from toNormalisedLower because having a call +// to `copy` slows it down even when it is not called. +func toNormalisedLowerSlow(s string, i int, a []byte) string { + var buf []byte + if cap(a) > len(s) { + buf = a[:len(s)] + copy(buf, s) + } else { + buf = []byte(s) + } + for ; i < len(s); i++ { + c := s[i] + if c >= utf8.RuneSelf { + return strings.Map(unicode.ToLower, norm.NFKD.String(s)) + } + if 'A' <= c && c <= 'Z' { + buf[i] = c + 'a' - 'A' + } } return yoloString(buf) } @@ -967,7 +981,7 @@ func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) Str return true } - analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, _ StringMatcher) bool { // Ensure we don't have mixed case sensitivity. if caseSensitiveSet && caseSensitive != prefixCaseSensitive { return false @@ -1002,7 +1016,7 @@ func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) Str findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { multiMatcher.add(matcher.s) return true - }, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + }, func(prefix string, _ bool, matcher StringMatcher) bool { multiMatcher.addPrefix(prefix, caseSensitive, matcher) return true }) diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index 24875e64ef..bb6fce3104 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -121,7 +121,7 @@ func TestFastRegexMatcher_MatchString(t *testing.T) { t.Parallel() m, err := NewFastRegexMatcher(r) require.NoError(t, err) - re := regexp.MustCompile("^(?:" + r + ")$") + re := regexp.MustCompile("^(?s:" + r + ")$") require.Equal(t, re.MatchString(v), m.MatchString(v)) }) } @@ -167,7 +167,7 @@ func TestOptimizeConcatRegex(t *testing.T) { } for _, c := range cases { - parsed, err := syntax.Parse(c.regex, syntax.Perl) + parsed, err := syntax.Parse(c.regex, syntax.Perl|syntax.DotNL) require.NoError(t, err) prefix, suffix, contains := optimizeConcatRegex(parsed) @@ -248,7 +248,7 @@ func TestFindSetMatches(t *testing.T) { c := c t.Run(c.pattern, func(t *testing.T) { t.Parallel() - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matches, actualCaseSensitive := findSetMatches(parsed) require.Equal(t, c.expMatches, matches) @@ -333,7 +333,8 @@ func BenchmarkToNormalizedLower(b *testing.B) { } b.ResetTimer() for n := 0; n < b.N; n++ { - toNormalisedLower(inputs[n%len(inputs)]) + var a [256]byte + toNormalisedLower(inputs[n%len(inputs)], a[:]) } }) } @@ -348,15 +349,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { pattern string exp StringMatcher }{ - {".*", anyStringWithoutNewlineMatcher{}}, - {".*?", anyStringWithoutNewlineMatcher{}}, + {".*", trueMatcher{}}, + {".*?", trueMatcher{}}, {"(?s:.*)", trueMatcher{}}, - {"(.*)", anyStringWithoutNewlineMatcher{}}, - {"^.*$", anyStringWithoutNewlineMatcher{}}, - {".+", &anyNonEmptyStringMatcher{matchNL: false}}, + {"(.*)", trueMatcher{}}, + {"^.*$", trueMatcher{}}, + {".+", &anyNonEmptyStringMatcher{matchNL: true}}, {"(?s:.+)", &anyNonEmptyStringMatcher{matchNL: true}}, - {"^.+$", &anyNonEmptyStringMatcher{matchNL: false}}, - {"(.+)", &anyNonEmptyStringMatcher{matchNL: false}}, + {"^.+$", &anyNonEmptyStringMatcher{matchNL: true}}, + {"(.+)", &anyNonEmptyStringMatcher{matchNL: true}}, {"", emptyStringMatcher{}}, {"^$", emptyStringMatcher{}}, {"^foo$", &equalStringMatcher{s: "foo", caseSensitive: true}}, @@ -366,23 +367,23 @@ func TestStringMatcherFromRegexp(t *testing.T) { {`(?i:((foo1|foo2|bar)))`, orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})}, {"^((?i:foo|oo)|(bar))$", orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO", caseSensitive: false}, &equalStringMatcher{s: "OO", caseSensitive: false}, &equalStringMatcher{s: "bar", caseSensitive: true}})}, {"(?i:(foo1|foo2|bar))", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})}, - {".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: anyStringWithoutNewlineMatcher{}}}, - {"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}}, - {"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}}, + {".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: trueMatcher{}}}, + {"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: trueMatcher{}}}, + {"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}}, {"10\\.0\\.(1|2)\\.+", nil}, - {"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{}, suffix: "foo", suffixCaseSensitive: true}}, - {"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: anyStringWithoutNewlineMatcher{}}}, - {"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, - {"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}, &equalStringMatcher{s: "foo", caseSensitive: true}})}, - {"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: false}}})}, - {"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: nil}}, + {"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{matchNL: true}, suffix: "foo", suffixCaseSensitive: true}}, + {"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: trueMatcher{}}}, + {"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}}, + {"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}, &equalStringMatcher{s: "foo", caseSensitive: true}})}, + {"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: true}}})}, + {"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: nil}}, // we don't support case insensitive matching for contains. // This is because there's no strings.IndexOfFold function. // We can revisit later if this is really popular by using strings.ToUpper. @@ -393,15 +394,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { {".*foo.*bar.*", nil}, {`\d*`, nil}, {".", nil}, - {"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: anyStringWithoutNewlineMatcher{}}}}}, + {"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: trueMatcher{}}}}}, // This one is not supported because `stringMatcherFromRegexp` is not reentrant for syntax.OpConcat. // It would make the code too complex to handle it. {"(.+)/(foo.*|bar$)", nil}, // Case sensitive alternate with same literal prefix and .* suffix. - {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: anyStringWithoutNewlineMatcher{}}}}}, + {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: trueMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: trueMatcher{}}}}}, // Case insensitive alternate with same literal prefix and .* suffix. - {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, - {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, + {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}}, + {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}}, // Concatenated variable length selectors are not supported. {"foo.*.*", nil}, {"foo.+.+", nil}, @@ -410,15 +411,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { {"aaa.?.?", nil}, {"aaa.?.*", nil}, // Regexps with ".?". - {"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, + {"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, {"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, - {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}}, + {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}}, {"f.?o", nil}, } { c := c t.Run(c.pattern, func(t *testing.T) { t.Parallel() - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matches := stringMatcherFromRegexp(parsed) require.Equal(t, c.exp, matches) @@ -437,16 +438,16 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) { { pattern: "(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", expectedLiteralPrefixMatchers: 3, - expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX"}, - expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "xyz-016a-ixb-dp\n"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"}, + expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Case insensitive. { pattern: "(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", expectedLiteralPrefixMatchers: 3, - expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX"}, - expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp", "xyz-016a-ixb-dp\n"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"}, + expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp"}, }, // Nested literal prefixes, case sensitive. @@ -474,13 +475,13 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) { }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher. numPrefixMatchers := 0 @@ -523,16 +524,16 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) { { pattern: "(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)", expectedLiteralSuffixMatchers: 2, - expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op"}, - expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"}, + expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Case insensitive. { pattern: "(?i)(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)", expectedLiteralSuffixMatchers: 2, - expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op"}, - expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"}, + expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"}, + expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp"}, }, // Nested literal suffixes, case sensitive. @@ -552,13 +553,13 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) { }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains literalSuffixStringMatcher. numSuffixMatchers := 0 @@ -598,26 +599,26 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) { { pattern: "test.?", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"test", "test!"}, - expectedNotMatches: []string{"test\n", "tes", "test!!"}, + expectedMatches: []string{"test\n", "test", "test!"}, + expectedNotMatches: []string{"tes", "test!!"}, }, { pattern: ".?test", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"test", "!test"}, - expectedNotMatches: []string{"\ntest", "tes", "test!"}, + expectedMatches: []string{"\ntest", "test", "!test"}, + expectedNotMatches: []string{"tes", "test!"}, }, { pattern: "(aaa.?|bbb.?)", expectedZeroOrOneMatchers: 2, - expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX"}, - expectedNotMatches: []string{"aa", "aaaXX", "aaa\n", "bb", "bbbXX", "bbb\n"}, + expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX", "aaa\n", "bbb\n"}, + expectedNotMatches: []string{"aa", "aaaXX", "bb", "bbbXX"}, }, { pattern: ".*aaa.?", expectedZeroOrOneMatchers: 1, - expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX"}, - expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX", "XXXaaa\n"}, + expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX", "XXXaaa\n"}, + expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX"}, }, // Match newline. @@ -632,18 +633,18 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) { { pattern: "(aaa.?|((?s).?bbb.+))", expectedZeroOrOneMatchers: 2, - expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX"}, - expectedNotMatches: []string{"aa", "aaa\n", "Xbbb", "\nbbb"}, + expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX", "aaa\n"}, + expectedNotMatches: []string{"aa", "Xbbb", "\nbbb"}, }, } { t.Run(c.pattern, func(t *testing.T) { - parsed, err := syntax.Parse(c.pattern, syntax.Perl) + parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) require.NoError(t, err) matcher := stringMatcherFromRegexp(parsed) require.NotNil(t, matcher) - re := regexp.MustCompile("^" + c.pattern + "$") + re := regexp.MustCompile("^(?s:" + c.pattern + ")$") // Pre-condition check: ensure it contains zeroOrOneCharacterStringMatcher. numZeroOrOneMatchers := 0 @@ -986,7 +987,7 @@ func TestFindEqualOrPrefixStringMatchers(t *testing.T) { ok = findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { matches = append(matches, match{matcher.s, matcher.caseSensitive}) return true - }, func(prefix string, prefixCaseSensitive bool, right StringMatcher) bool { + }, func(prefix string, prefixCaseSensitive bool, _ StringMatcher) bool { matches = append(matches, match{prefix, prefixCaseSensitive}) return true }) @@ -1112,7 +1113,7 @@ func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) { } b.Logf("regexp: %s", re) - parsed, err := syntax.Parse(re, syntax.Perl) + parsed, err := syntax.Parse(re, syntax.Perl|syntax.DotNL) require.NoError(b, err) unoptimized := stringMatcherFromRegexpInternal(parsed) @@ -1390,6 +1391,6 @@ func TestToNormalisedLower(t *testing.T) { "ſſAſſa": "ssassa", } for input, expectedOutput := range testCases { - require.Equal(t, expectedOutput, toNormalisedLower(input)) + require.Equal(t, expectedOutput, toNormalisedLower(input, nil)) } } diff --git a/model/labels/sharding.go b/model/labels/sharding.go index 5e3e89fbbb..ed05da675f 100644 --- a/model/labels/sharding.go +++ b/model/labels/sharding.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !stringlabels && !dedupelabels +//go:build slicelabels package labels @@ -39,9 +39,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b) } diff --git a/model/labels/sharding_dedupelabels.go b/model/labels/sharding_dedupelabels.go index 5912724f9b..5bf41b05d6 100644 --- a/model/labels/sharding_dedupelabels.go +++ b/model/labels/sharding_dedupelabels.go @@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) pos = newPos } return xxhash.Sum64(b) diff --git a/model/labels/sharding_stringlabels.go b/model/labels/sharding_stringlabels.go index 3ad2027d8c..4dcbaa21d1 100644 --- a/model/labels/sharding_stringlabels.go +++ b/model/labels/sharding_stringlabels.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build stringlabels +//go:build !slicelabels && !dedupelabels package labels @@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } if h != nil { return h.Sum64() diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index 4f33edda43..70daef426f 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -16,6 +16,8 @@ package relabel import ( "crypto/md5" "encoding/binary" + "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -27,7 +29,8 @@ import ( ) var ( - relabelTarget = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) + // relabelTargetLegacy allows targeting labels with legacy Prometheus character set, plus ${} variables for dynamic characters from source the metrics. + relabelTargetLegacy = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) DefaultRelabelConfig = Config{ Action: Replace, @@ -83,20 +86,20 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { type Config struct { // A list of labels from which values are taken and concatenated // with the configured separator in order. - SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty"` + SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty" json:"sourceLabels,omitempty"` // Separator is the string between concatenated values from the source labels. - Separator string `yaml:"separator,omitempty"` + Separator string `yaml:"separator,omitempty" json:"separator,omitempty"` // Regex against which the concatenation is matched. - Regex Regexp `yaml:"regex,omitempty"` + Regex Regexp `yaml:"regex,omitempty" json:"regex,omitempty"` // Modulus to take of the hash of concatenated values from the source labels. - Modulus uint64 `yaml:"modulus,omitempty"` + Modulus uint64 `yaml:"modulus,omitempty" json:"modulus,omitempty"` // TargetLabel is the label to which the resulting string is written in a replacement. // Regexp interpolation is allowed for the replace action. - TargetLabel string `yaml:"target_label,omitempty"` + TargetLabel string `yaml:"target_label,omitempty" json:"targetLabel,omitempty"` // Replacement is the regex replacement pattern to be used. - Replacement string `yaml:"replacement,omitempty"` + Replacement string `yaml:"replacement,omitempty" json:"replacement,omitempty"` // Action is the action to be performed for the relabeling. - Action Action `yaml:"action,omitempty"` + Action Action `yaml:"action,omitempty" json:"action,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -114,18 +117,25 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { func (c *Config) Validate() error { if c.Action == "" { - return fmt.Errorf("relabel action cannot be empty") + return errors.New("relabel action cannot be empty") } if c.Modulus == 0 && c.Action == HashMod { - return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") + return errors.New("relabel configuration for hashmod requires non-zero modulus") } if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if c.Action == Replace && !strings.Contains(c.TargetLabel, "$") && !model.LabelName(c.TargetLabel).IsValid() { + if c.Action == Replace && !varInRegexTemplate(c.TargetLabel) && !model.LabelName(c.TargetLabel).IsValid() { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } - if c.Action == Replace && strings.Contains(c.TargetLabel, "$") && !relabelTarget.MatchString(c.TargetLabel) { + + isValidLabelNameWithRegexVarFn := func(value string) bool { + // UTF-8 allows ${} characters, so standard validation allow $variables by default. + // TODO(bwplotka): Relabelling users cannot put $ and ${<...>} characters in metric names or values. + // Design escaping mechanism to allow that, once valid use case appears. + return model.LabelName(value).IsValid() + } + if c.Action == Replace && varInRegexTemplate(c.TargetLabel) && !isValidLabelNameWithRegexVarFn(c.TargetLabel) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !model.LabelName(c.TargetLabel).IsValid() { @@ -134,7 +144,7 @@ func (c *Config) Validate() error { if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { return fmt.Errorf("'replacement' can not be set for %s action", c.Action) } - if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { + if c.Action == LabelMap && !isValidLabelNameWithRegexVarFn(c.Replacement) { return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) } if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() { @@ -171,7 +181,7 @@ type Regexp struct { // NewRegexp creates a new anchored Regexp and returns an error if the // passed-in regular expression does not compile. func NewRegexp(s string) (Regexp, error) { - regex, err := regexp.Compile("^(?:" + s + ")$") + regex, err := regexp.Compile("^(?s:" + s + ")$") return Regexp{Regexp: regex}, err } @@ -206,6 +216,25 @@ func (re Regexp) MarshalYAML() (interface{}, error) { return nil, nil } +// UnmarshalJSON implements the json.Unmarshaler interface. +func (re *Regexp) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + r, err := NewRegexp(s) + if err != nil { + return err + } + *re = r + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (re Regexp) MarshalJSON() ([]byte, error) { + return json.Marshal(re.String()) +} + // IsZero implements the yaml.IsZeroer interface. func (re Regexp) IsZero() bool { return re.Regexp == DefaultRelabelConfig.Regex.Regexp @@ -213,9 +242,13 @@ func (re Regexp) IsZero() bool { // String returns the original string used to compile the regular expression. func (re Regexp) String() string { + if re.Regexp == nil { + return "" + } + str := re.Regexp.String() - // Trim the anchor `^(?:` prefix and `)$` suffix. - return str[4 : len(str)-2] + // Trim the anchor `^(?s:` prefix and `)$` suffix. + return str[5 : len(str)-2] } // Process returns a relabeled version of the given label set. The relabel configurations @@ -273,6 +306,13 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return false } case Replace: + // Fast path to add or delete label pair. + if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && + !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { + lb.Set(cfg.TargetLabel, cfg.Replacement) + break + } + indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. if indexes == nil { @@ -322,3 +362,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return true } + +func varInRegexTemplate(template string) bool { + return strings.Contains(template, "$") +} diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index 0f11f7068d..845e304f3b 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -14,6 +14,7 @@ package relabel import ( + "encoding/json" "strconv" "testing" @@ -569,6 +570,166 @@ func TestRelabel(t *testing.T) { }, drop: true, }, + { + input: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("line1.*line2"), + TargetLabel: "d", + Separator: ";", + Replacement: "match${1}", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + "d": "match", + }), + }, + { // Replace on source label with UTF-8 characters. + input: labels.FromMap(map[string]string{ + "utf-8.label": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"utf-8.label"}, + Regex: MustNewRegexp("line1.*line2"), + TargetLabel: "d", + Separator: ";", + Replacement: `match${1}`, + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "utf-8.label": "line1\nline2", + "b": "bar", + "c": "baz", + "d": "match", + }), + }, + { // Replace targetLabel with UTF-8 characters. + input: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("line1.*line2"), + TargetLabel: "utf-8.label", + Separator: ";", + Replacement: `match${1}`, + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + "utf-8.label": "match", + }), + }, + { // Replace targetLabel with UTF-8 characters and $variable. + input: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("line1.*line2"), + TargetLabel: "utf-8.label${1}", + Separator: ";", + Replacement: "match${1}", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + "utf-8.label": "match", + }), + }, + { // Replace targetLabel with UTF-8 characters and various $var styles. + input: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("(line1).*(?line2)"), + TargetLabel: "label1_${1}", + Separator: ";", + Replacement: "val_${second}", + Action: Replace, + }, + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("(line1).*(?line2)"), + TargetLabel: "label2_$1", + Separator: ";", + Replacement: "val_$second", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + "label1_line1": "val_line2", + "label2_line1": "val_line2", + }), + }, + { + input: labels.FromMap(map[string]string{ + "__name__": "http_requests_total", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"__name__"}, + Regex: MustNewRegexp(".*_total$"), + TargetLabel: "__type__", + Replacement: "counter", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "__name__": "http_requests_total", + "__type__": "counter", + }), + }, + { + input: labels.FromMap(map[string]string{ + "__name__": "disk_usage_bytes", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"__name__"}, + Regex: MustNewRegexp(".*_bytes$"), + TargetLabel: "__unit__", + Replacement: "bytes", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "__name__": "disk_usage_bytes", + "__unit__": "bytes", + }), + }, } for _, test := range tests { @@ -622,9 +783,8 @@ func TestRelabelValidate(t *testing.T) { config: Config{ Action: Lowercase, Replacement: DefaultRelabelConfig.Replacement, - TargetLabel: "${3}", + TargetLabel: "${3}", // With UTF-8 naming, this is now a legal relabel rule. }, - expected: `"${3}" is invalid 'target_label'`, }, { config: Config{ @@ -641,9 +801,8 @@ func TestRelabelValidate(t *testing.T) { Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), Action: Replace, Replacement: "${1}", - TargetLabel: "0${3}", + TargetLabel: "0${3}", // With UTF-8 naming this targets a valid label. }, - expected: `"0${3}" is invalid 'target_label'`, }, { config: Config{ @@ -651,9 +810,8 @@ func TestRelabelValidate(t *testing.T) { Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), Action: Replace, Replacement: "${1}", - TargetLabel: "-${3}", + TargetLabel: "-${3}", // With UTF-8 naming this targets a valid label. }, - expected: `"-${3}" is invalid 'target_label' for replace action`, }, } for i, test := range tests { @@ -668,8 +826,8 @@ func TestRelabelValidate(t *testing.T) { } } -func TestTargetLabelValidity(t *testing.T) { - tests := []struct { +func TestTargetLabelLegacyValidity(t *testing.T) { + for _, test := range []struct { str string valid bool }{ @@ -686,11 +844,11 @@ func TestTargetLabelValidity(t *testing.T) { {"$1", true}, {"asd$2asd", true}, {"-foo${1}bar-", false}, + {"bar.foo${1}bar", false}, {"_${1}_", true}, {"foo${bar}foo", true}, - } - for _, test := range tests { - require.Equal(t, test.valid, relabelTarget.Match([]byte(test.str)), + } { + require.Equal(t, test.valid, relabelTargetLegacy.Match([]byte(test.str)), "Expected %q to be %v", test.str, test.valid) } } @@ -838,6 +996,34 @@ func BenchmarkRelabel(b *testing.B) { "__scrape_timeout__", "10s", "job", "kubernetes-pods"), }, + { + name: "static label pair", + config: ` + - replacement: wwwwww + target_label: wwwwww + - replacement: yyyyyyyyyyyy + target_label: xxxxxxxxx + - replacement: xxxxxxxxx + target_label: yyyyyyyyyyyy + - source_labels: ["something"] + target_label: with_source_labels + replacement: value + - replacement: dropped + target_label: ${0} + - replacement: ${0} + target_label: dropped`, + lbls: labels.FromStrings( + "abcdefg01", "hijklmn1", + "abcdefg02", "hijklmn2", + "abcdefg03", "hijklmn3", + "abcdefg04", "hijklmn4", + "abcdefg05", "hijklmn5", + "abcdefg06", "hijklmn6", + "abcdefg07", "hijklmn7", + "abcdefg08", "hijklmn8", + "job", "foo", + ), + }, } for i := range tests { err := yaml.UnmarshalStrict([]byte(tests[i].config), &tests[i].cfgs) @@ -900,3 +1086,48 @@ action: replace }) } } + +func TestRegexp_ShouldMarshalAndUnmarshalZeroValue(t *testing.T) { + var zero Regexp + + marshalled, err := yaml.Marshal(&zero) + require.NoError(t, err) + require.Equal(t, "null\n", string(marshalled)) + + var unmarshalled Regexp + err = yaml.Unmarshal(marshalled, &unmarshalled) + require.NoError(t, err) + require.Nil(t, unmarshalled.Regexp) +} + +func TestRegexp_JSONUnmarshalThenMarshal(t *testing.T) { + tests := []struct { + name string + input string + }{ + { + name: "Empty regex", + input: `{"regex":""}`, + }, + { + name: "string literal", + input: `{"regex":"foo"}`, + }, + { + name: "regex", + input: `{"regex":".*foo.*"}`, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var unmarshalled Config + err := json.Unmarshal([]byte(test.input), &unmarshalled) + require.NoError(t, err) + + marshalled, err := json.Marshal(&unmarshalled) + require.NoError(t, err) + + require.Equal(t, test.input, string(marshalled)) + }) + } +} diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index bfb85ce740..9b1c897a98 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -92,7 +92,7 @@ type RuleGroups struct { } type ruleGroups struct { - Groups []yaml.Node `yaml:"groups"` + Groups []RuleGroupNode `yaml:"groups"` } // Validate validates all rules in the rule groups. @@ -111,12 +111,26 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { ) } + for k, v := range g.Labels { + if !model.LabelName(k).IsValid() || k == model.MetricNameLabel { + errs = append( + errs, fmt.Errorf("invalid label name: %s", k), + ) + } + + if !model.LabelValue(v).IsValid() { + errs = append( + errs, fmt.Errorf("invalid label value: %s", v), + ) + } + } + set[g.Name] = struct{}{} for i, r := range g.Rules { - for _, node := range g.Rules[i].Validate() { - var ruleName yaml.Node - if r.Alert.Value != "" { + for _, node := range r.Validate(node.Groups[j].Rules[i]) { + var ruleName string + if r.Alert != "" { ruleName = r.Alert } else { ruleName = r.Record @@ -124,7 +138,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { errs = append(errs, &Error{ Group: g.Name, Rule: i + 1, - RuleName: ruleName.Value, + RuleName: ruleName, Err: node, }) } @@ -136,11 +150,23 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { // RuleGroup is a list of sequentially evaluated recording and alerting rules. type RuleGroup struct { - Name string `yaml:"name"` - Interval model.Duration `yaml:"interval,omitempty"` - QueryOffset *model.Duration `yaml:"query_offset,omitempty"` - Limit int `yaml:"limit,omitempty"` - Rules []RuleNode `yaml:"rules"` + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + QueryOffset *model.Duration `yaml:"query_offset,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []Rule `yaml:"rules"` + Labels map[string]string `yaml:"labels,omitempty"` +} + +// RuleGroupNode adds yaml.v3 layer to support line and columns outputs for invalid rule groups. +type RuleGroupNode struct { + yaml.Node + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + QueryOffset *model.Duration `yaml:"query_offset,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []RuleNode `yaml:"rules"` + Labels map[string]string `yaml:"labels,omitempty"` } // Rule describes an alerting or recording rule. @@ -166,56 +192,64 @@ type RuleNode struct { } // Validate the rule and return a list of encountered errors. -func (r *RuleNode) Validate() (nodes []WrappedError) { - if r.Record.Value != "" && r.Alert.Value != "" { +func (r *Rule) Validate(node RuleNode) (nodes []WrappedError) { + if r.Record != "" && r.Alert != "" { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("only one of 'record' and 'alert' must be set"), - node: &r.Record, - nodeAlt: &r.Alert, + err: errors.New("only one of 'record' and 'alert' must be set"), + node: &node.Record, + nodeAlt: &node.Alert, }) } - if r.Record.Value == "" && r.Alert.Value == "" { + if r.Record == "" && r.Alert == "" { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("one of 'record' or 'alert' must be set"), - node: &r.Record, - nodeAlt: &r.Alert, + err: errors.New("one of 'record' or 'alert' must be set"), + node: &node.Record, + nodeAlt: &node.Alert, }) } - if r.Expr.Value == "" { + if r.Expr == "" { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("field 'expr' must be set in rule"), - node: &r.Expr, + err: errors.New("field 'expr' must be set in rule"), + node: &node.Expr, }) - } else if _, err := parser.ParseExpr(r.Expr.Value); err != nil { + } else if _, err := parser.ParseExpr(r.Expr); err != nil { nodes = append(nodes, WrappedError{ err: fmt.Errorf("could not parse expression: %w", err), - node: &r.Expr, + node: &node.Expr, }) } - if r.Record.Value != "" { + if r.Record != "" { if len(r.Annotations) > 0 { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("invalid field 'annotations' in recording rule"), - node: &r.Record, + err: errors.New("invalid field 'annotations' in recording rule"), + node: &node.Record, }) } if r.For != 0 { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("invalid field 'for' in recording rule"), - node: &r.Record, + err: errors.New("invalid field 'for' in recording rule"), + node: &node.Record, }) } if r.KeepFiringFor != 0 { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"), - node: &r.Record, + err: errors.New("invalid field 'keep_firing_for' in recording rule"), + node: &node.Record, }) } - if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) { + if !model.IsValidMetricName(model.LabelValue(r.Record)) { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("invalid recording rule name: %s", r.Record.Value), - node: &r.Record, + err: fmt.Errorf("invalid recording rule name: %s", r.Record), + node: &node.Record, + }) + } + // While record is a valid UTF-8 it's common mistake to put PromQL expression in the record name. + // Disallow "{}" chars. + if strings.Contains(r.Record, "{") || strings.Contains(r.Record, "}") { + nodes = append(nodes, WrappedError{ + err: fmt.Errorf("braces present in the recording rule name; should it be in expr?: %s", r.Record), + node: &node.Record, }) } } @@ -251,8 +285,8 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { // testTemplateParsing checks if the templates used in labels and annotations // of the alerting rules are parsed correctly. -func testTemplateParsing(rl *RuleNode) (errs []error) { - if rl.Alert.Value == "" { +func testTemplateParsing(rl *Rule) (errs []error) { + if rl.Alert == "" { // Not an alerting rule. return errs } @@ -269,7 +303,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) { tmpl := template.NewTemplateExpander( context.TODO(), strings.Join(append(defs, text), ""), - "__alert_"+rl.Alert.Value, + "__alert_"+rl.Alert, tmplData, model.Time(timestamp.FromTime(time.Now())), nil, @@ -299,7 +333,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) { } // Parse parses and validates a set of rules. -func Parse(content []byte) (*RuleGroups, []error) { +func Parse(content []byte, ignoreUnknownFields bool) (*RuleGroups, []error) { var ( groups RuleGroups node ruleGroups @@ -307,7 +341,9 @@ func Parse(content []byte) (*RuleGroups, []error) { ) decoder := yaml.NewDecoder(bytes.NewReader(content)) - decoder.KnownFields(true) + if !ignoreUnknownFields { + decoder.KnownFields(true) + } err := decoder.Decode(&groups) // Ignore io.EOF which happens with empty input. if err != nil && !errors.Is(err, io.EOF) { @@ -326,12 +362,12 @@ func Parse(content []byte) (*RuleGroups, []error) { } // ParseFile reads and parses rules from a file. -func ParseFile(file string) (*RuleGroups, []error) { +func ParseFile(file string, ignoreUnknownFields bool) (*RuleGroups, []error) { b, err := os.ReadFile(file) if err != nil { return nil, []error{fmt.Errorf("%s: %w", file, err)} } - rgs, errs := Parse(b) + rgs, errs := Parse(b, ignoreUnknownFields) for i := range errs { errs[i] = fmt.Errorf("%s: %w", file, errs[i]) } diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index ef5008f4bf..e8ac3077bd 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -24,12 +24,44 @@ import ( ) func TestParseFileSuccess(t *testing.T) { - _, errs := ParseFile("testdata/test.yaml") + _, errs := ParseFile("testdata/test.yaml", false) + require.Empty(t, errs, "unexpected errors parsing file") + + _, errs = ParseFile("testdata/utf-8_lname.good.yaml", false) + require.Empty(t, errs, "unexpected errors parsing file") + _, errs = ParseFile("testdata/utf-8_annotation.good.yaml", false) require.Empty(t, errs, "unexpected errors parsing file") } +func TestParseFileSuccessWithAliases(t *testing.T) { + exprString := `sum without(instance) (rate(errors_total[5m])) +/ +sum without(instance) (rate(requests_total[5m])) +` + rgs, errs := ParseFile("testdata/test_aliases.yaml", false) + require.Empty(t, errs, "unexpected errors parsing file") + for _, rg := range rgs.Groups { + require.Equal(t, "HighAlert", rg.Rules[0].Alert) + require.Equal(t, "critical", rg.Rules[0].Labels["severity"]) + require.Equal(t, "stuff's happening with {{ $.labels.service }}", rg.Rules[0].Annotations["description"]) + + require.Equal(t, "new_metric", rg.Rules[1].Record) + + require.Equal(t, "HighAlert", rg.Rules[2].Alert) + require.Equal(t, "critical", rg.Rules[2].Labels["severity"]) + require.Equal(t, "stuff's happening with {{ $.labels.service }}", rg.Rules[0].Annotations["description"]) + + require.Equal(t, "HighAlert2", rg.Rules[3].Alert) + require.Equal(t, "critical", rg.Rules[3].Labels["severity"]) + + for _, rule := range rg.Rules { + require.Equal(t, exprString, rule.Expr) + } + } +} + func TestParseFileFailure(t *testing.T) { - table := []struct { + for _, c := range []struct { filename string errMsg string }{ @@ -53,17 +85,9 @@ func TestParseFileFailure(t *testing.T) { filename: "noexpr.bad.yaml", errMsg: "field 'expr' must be set in rule", }, - { - filename: "bad_lname.bad.yaml", - errMsg: "invalid label name", - }, - { - filename: "bad_annotation.bad.yaml", - errMsg: "invalid annotation name", - }, { filename: "invalid_record_name.bad.yaml", - errMsg: "invalid recording rule name", + errMsg: "braces present in the recording rule name; should it be in expr?: strawberry{flavor=\"sweet\"}", }, { filename: "bad_field.bad.yaml", @@ -81,13 +105,12 @@ func TestParseFileFailure(t *testing.T) { filename: "record_and_keep_firing_for.bad.yaml", errMsg: "invalid field 'keep_firing_for' in recording rule", }, - } - - for _, c := range table { - _, errs := ParseFile(filepath.Join("testdata", c.filename)) - require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename) - require.Error(t, errs[0]) - require.Containsf(t, errs[0].Error(), c.errMsg, "Expected error for %s.", c.filename) + } { + t.Run(c.filename, func(t *testing.T) { + _, errs := ParseFile(filepath.Join("testdata", c.filename), false) + require.NotEmpty(t, errs, "Expected error parsing %s but got none", c.filename) + require.ErrorContainsf(t, errs[0], c.errMsg, "Expected error for %s.", c.filename) + }) } } @@ -108,6 +131,23 @@ groups: severity: "page" annotations: summary: "Instance {{ $labels.instance }} down" +`, + shouldPass: true, + }, + { + ruleString: ` +groups: +- name: example + labels: + team: myteam + rules: + - alert: InstanceDown + expr: up == 0 + for: 5m + labels: + severity: "page" + annotations: + summary: "Instance {{ $labels.instance }} down" `, shouldPass: true, }, @@ -163,7 +203,7 @@ groups: } for _, tst := range tests { - rgs, errs := Parse([]byte(tst.ruleString)) + rgs, errs := Parse([]byte(tst.ruleString), false) require.NotNil(t, rgs, "Rule parsing, rule=\n"+tst.ruleString) passed := (tst.shouldPass && len(errs) == 0) || (!tst.shouldPass && len(errs) > 0) require.True(t, passed, "Rule validation failed, rule=\n"+tst.ruleString) @@ -190,7 +230,7 @@ groups: annotations: summary: "Instance {{ $labels.instance }} up" ` - _, errs := Parse([]byte(group)) + _, errs := Parse([]byte(group), false) require.Len(t, errs, 2, "Expected two errors") var err00 *Error require.ErrorAs(t, errs[0], &err00) @@ -259,8 +299,7 @@ func TestError(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := tt.error.Error() - require.Equal(t, tt.want, got) + require.EqualError(t, tt.error, tt.want) }) } } @@ -308,8 +347,7 @@ func TestWrappedError(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := tt.wrappedError.Error() - require.Equal(t, tt.want, got) + require.EqualError(t, tt.wrappedError, tt.want) }) } } diff --git a/model/rulefmt/testdata/test_aliases.yaml b/model/rulefmt/testdata/test_aliases.yaml new file mode 100644 index 0000000000..326259b0b6 --- /dev/null +++ b/model/rulefmt/testdata/test_aliases.yaml @@ -0,0 +1,57 @@ +groups: + - name: my-group-name + interval: 30s # defaults to global interval + rules: + - &highalert + alert: &alertname HighAlert + expr: &expr | + sum without(instance) (rate(errors_total[5m])) + / + sum without(instance) (rate(requests_total[5m])) + for: 5m + labels: + severity: &severity critical + annotations: + description: &description "stuff's happening with {{ $.labels.service }}" + + # Mix recording rules in the same list + - record: &recordname "new_metric" + expr: *expr + labels: + abc: edf + uvw: xyz + + - alert: *alertname + expr: *expr + for: 5m + labels: + severity: *severity + annotations: + description: *description + + - <<: *highalert + alert: HighAlert2 + + - name: my-another-name + interval: 30s # defaults to global interval + rules: + - alert: *alertname + expr: *expr + for: 5m + labels: + severity: *severity + annotations: + description: *description + + - record: *recordname + expr: *expr + + - alert: *alertname + expr: *expr + labels: + severity: *severity + annotations: + description: *description + + - <<: *highalert + alert: HighAlert2 diff --git a/model/rulefmt/testdata/bad_annotation.bad.yaml b/model/rulefmt/testdata/utf-8_annotation.good.yaml similarity index 100% rename from model/rulefmt/testdata/bad_annotation.bad.yaml rename to model/rulefmt/testdata/utf-8_annotation.good.yaml diff --git a/model/rulefmt/testdata/bad_lname.bad.yaml b/model/rulefmt/testdata/utf-8_lname.good.yaml similarity index 100% rename from model/rulefmt/testdata/bad_lname.bad.yaml rename to model/rulefmt/testdata/utf-8_lname.good.yaml diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go new file mode 100644 index 0000000000..e15a6b3846 --- /dev/null +++ b/model/textparse/benchmark_test.go @@ -0,0 +1,340 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" +) + +// BenchmarkParse... set of benchmarks analyze efficiency of parsing various +// datasets with different parsers. It mimics how scrape/scrape.go#append use parsers +// and allows comparison with expfmt decoders if applicable. +// +// NOTE(bwplotka): Previous iterations of this benchmark had different cases for isolated +// Series, Series+Metrics with and without reuse, Series+CT. Those cases are sometimes +// good to know if you are working on a certain optimization, but it does not +// make sense to persist such cases for everybody (e.g. for CI one day). +// For local iteration, feel free to adjust cases/comment out code etc. +// +// NOTE(bwplotka): Those benchmarks are purposefully categorized per data-sets, +// to avoid temptation to assess "what parser (OM, proto, prom) is the fastest, +// in general" here due to not every parser supporting every data set type. +// Use scrape.BenchmarkScrapeLoopAppend if you want one benchmark comparing parsers fairly. + +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParsePromText' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkParsePromText(b *testing.B) { + data := readTestdataFile(b, "alltypes.237mfs.prom.txt") + + for _, parser := range []string{ + "promtext", + "omtext", // Compare how omtext parser deals with Prometheus text format. + "expfmt-promtext", + } { + b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) { + if strings.HasPrefix(parser, "expfmt-") { + benchExpFmt(b, data, parser) + } else { + benchParse(b, data, parser) + } + }) + } +} + +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParsePromText_NoMeta' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkParsePromText_NoMeta(b *testing.B) { + data := readTestdataFile(b, "alltypes.237mfs.nometa.prom.txt") + + for _, parser := range []string{ + "promtext", + "expfmt-promtext", + } { + b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) { + if strings.HasPrefix(parser, "expfmt-") { + benchExpFmt(b, data, parser) + } else { + benchParse(b, data, parser) + } + }) + } +} + +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParseOMText' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkParseOMText(b *testing.B) { + data := readTestdataFile(b, "alltypes.5mfs.om.txt") + // TODO(bwplotka): Add comparison with expfmt.TypeOpenMetrics once expfmt + // support OM exemplars, see https://github.com/prometheus/common/issues/703. + benchParse(b, data, "omtext") +} + +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParsePromProto' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkParsePromProto(b *testing.B) { + data := createTestProtoBuf(b).Bytes() + // TODO(bwplotka): Add comparison with expfmt.TypeProtoDelim once expfmt + // support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430. + benchParse(b, data, "promproto") +} + +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParseOpenMetricsNHCB' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkParseOpenMetricsNHCB(b *testing.B) { + data := readTestdataFile(b, "1histogram.om.txt") + + for _, parser := range []string{ + "omtext", // Measure OM parser baseline for histograms. + "omtext_with_nhcb", // Measure NHCB over OM parser. + } { + b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) { + benchParse(b, data, parser) + }) + } +} + +func benchParse(b *testing.B, data []byte, parser string) { + type newParser func([]byte, *labels.SymbolTable) Parser + + var newParserFn newParser + switch parser { + case "promtext": + newParserFn = func(b []byte, st *labels.SymbolTable) Parser { + return NewPromParser(b, st, false) + } + case "promproto": + newParserFn = func(b []byte, st *labels.SymbolTable) Parser { + return NewProtobufParser(b, true, false, st) + } + case "omtext": + newParserFn = func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) + } + case "omtext_with_nhcb": + newParserFn = func(b []byte, st *labels.SymbolTable) Parser { + p := NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) + return NewNHCBParser(p, st, false) + } + default: + b.Fatal("unknown parser", parser) + } + + var ( + res labels.Labels + e exemplar.Exemplar + ) + + b.SetBytes(int64(len(data))) + b.ReportAllocs() + b.ResetTimer() + + st := labels.NewSymbolTable() + for i := 0; i < b.N; i++ { + p := newParserFn(data, st) + + Inner: + for { + t, err := p.Next() + switch t { + case EntryInvalid: + if errors.Is(err, io.EOF) { + break Inner + } + b.Fatal(err) + case EntryType: + _, _ = p.Type() + continue + case EntryHelp: + _, _ = p.Help() + continue + case EntryUnit: + _, _ = p.Unit() + continue + case EntryComment: + continue + case EntryHistogram: + _, _, _, _ = p.Histogram() + case EntrySeries: + _, _, _ = p.Series() + default: + b.Fatal("not implemented entry", t) + } + + p.Labels(&res) + _ = p.CreatedTimestamp() + for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { + } + } + } +} + +func benchExpFmt(b *testing.B, data []byte, expFormatTypeStr string) { + expfmtFormatType := expfmt.TypeUnknown + switch expFormatTypeStr { + case "expfmt-promtext": + expfmtFormatType = expfmt.TypeProtoText + case "expfmt-promproto": + expfmtFormatType = expfmt.TypeProtoDelim + case "expfmt-omtext": + expfmtFormatType = expfmt.TypeOpenMetrics + default: + b.Fatal("unknown expfmt format type", expFormatTypeStr) + } + + b.SetBytes(int64(len(data))) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + decSamples := make(model.Vector, 0, 50) + sdec := expfmt.SampleDecoder{ + Dec: expfmt.NewDecoder(bytes.NewReader(data), expfmt.NewFormat(expfmtFormatType)), + Opts: &expfmt.DecodeOptions{ + Timestamp: model.TimeFromUnixNano(0), + }, + } + + for { + if err := sdec.Decode(&decSamples); err != nil { + if errors.Is(err, io.EOF) { + break + } + b.Fatal(err) + } + decSamples = decSamples[:0] + } + } +} + +func readTestdataFile(tb testing.TB, file string) []byte { + tb.Helper() + + f, err := os.Open(filepath.Join("testdata", file)) + require.NoError(tb, err) + + tb.Cleanup(func() { + _ = f.Close() + }) + buf, err := io.ReadAll(f) + require.NoError(tb, err) + return buf +} + +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkCreatedTimestampPromProto' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkCreatedTimestampPromProto(b *testing.B) { + data := createTestProtoBuf(b).Bytes() + + st := labels.NewSymbolTable() + p := NewProtobufParser(data, true, false, st) + + found := false +Inner: + for { + t, err := p.Next() + switch t { + case EntryInvalid: + b.Fatal(err) + case EntryType: + m, _ := p.Type() + if string(m) == "go_memstats_alloc_bytes_total" { + found = true + break Inner + } + // Parser impl requires this (bug?) + case EntryHistogram: + _, _, _, _ = p.Histogram() + case EntrySeries: + _, _, _ = p.Series() + } + } + require.True(b, found) + b.Run("case=no-ct", func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if p.CreatedTimestamp() != 0 { + b.Fatal("should be nil") + } + } + }) + + found = false +Inner2: + for { + t, err := p.Next() + switch t { + case EntryInvalid: + b.Fatal(err) + case EntryType: + m, _ := p.Type() + if string(m) == "test_counter_with_createdtimestamp" { + found = true + break Inner2 + } + case EntryHistogram: + _, _, _, _ = p.Histogram() + case EntrySeries: + _, _, _ = p.Series() + } + } + require.True(b, found) + b.Run("case=ct", func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if p.CreatedTimestamp() == 0 { + b.Fatal("should be not nil") + } + } + }) +} diff --git a/model/textparse/interface.go b/model/textparse/interface.go index df01dbc34f..c97e1f02ee 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -14,6 +14,8 @@ package textparse import ( + "errors" + "fmt" "mime" "github.com/prometheus/common/model" @@ -23,17 +25,22 @@ import ( "github.com/prometheus/prometheus/model/labels" ) -// Parser parses samples from a byte slice of samples in the official -// Prometheus and OpenMetrics text exposition formats. +// Parser parses samples from a byte slice of samples in different exposition formats. type Parser interface { // Series returns the bytes of a series with a simple float64 as a // value, the timestamp if set, and the value of the current sample. + // TODO(bwplotka): Similar to CreatedTimestamp, have ts == 0 meaning no timestamp provided. + // We already accepted in many places (PRW, proto parsing histograms) that 0 timestamp is not a + // a valid timestamp. If needed it can be represented as 0+1ms. Series() ([]byte, *int64, float64) // Histogram returns the bytes of a series with a sparse histogram as a // value, the timestamp if set, and the histogram in the current sample. // Depending on the parsed input, the function returns an (integer) Histogram // or a FloatHistogram, with the respective other return value being nil. + // TODO(bwplotka): Similar to CreatedTimestamp, have ts == 0 meaning no timestamp provided. + // We already accepted in many places (PRW, proto parsing histograms) that 0 timestamp is not a + // a valid timestamp. If needed it can be represented as 0+1ms. Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) // Help returns the metric name and help text in the current entry. @@ -44,11 +51,13 @@ type Parser interface { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. + // TODO(bwplotka): Once type-and-unit-labels stabilizes we could remove this method. Type() ([]byte, model.MetricType) // Unit returns the metric name and unit in the current entry. // Must only be called after Next returned a unit entry. // The returned byte slices become invalid after the next call to Next. + // TODO(bwplotka): Once type-and-unit-labels stabilizes we could remove this method. Unit() ([]byte, []byte) // Comment returns the text of the current comment. @@ -56,9 +65,10 @@ type Parser interface { // The returned byte slice becomes invalid after the next call to Next. Comment() []byte - // Metric writes the labels of the current sample into the passed labels. - // It returns the string from which the metric was parsed. - Metric(l *labels.Labels) string + // Labels writes the labels of the current sample into the passed labels. + // The values of the "le" labels of classic histograms and "quantile" labels + // of summaries should follow the OpenMetrics formatting rules. + Labels(l *labels.Labels) // Exemplar writes the exemplar of the current sample into the passed // exemplar. It can be called repeatedly to retrieve multiple exemplars @@ -67,35 +77,75 @@ type Parser interface { Exemplar(l *exemplar.Exemplar) bool // CreatedTimestamp returns the created timestamp (in milliseconds) for the - // current sample. It returns nil if it is unknown e.g. if it wasn't set, + // current sample. It returns 0 if it is unknown e.g. if it wasn't set or // if the scrape protocol or metric type does not support created timestamps. - CreatedTimestamp() *int64 + CreatedTimestamp() int64 // Next advances the parser to the next sample. // It returns (EntryInvalid, io.EOF) if no samples were read. Next() (Entry, error) } -// New returns a new parser of the byte slice. -// -// This function always returns a valid parser, but might additionally -// return an error if the content type cannot be parsed. -func New(b []byte, contentType string, parseClassicHistograms bool, st *labels.SymbolTable) (Parser, error) { +// extractMediaType returns the mediaType of a required parser. It tries first to +// extract a valid and supported mediaType from contentType. If that fails, +// the provided fallbackType (possibly an empty string) is returned, together with +// an error. fallbackType is used as-is without further validation. +func extractMediaType(contentType, fallbackType string) (string, error) { if contentType == "" { - return NewPromParser(b, st), nil + if fallbackType == "" { + return "", errors.New("non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target") + } + return fallbackType, fmt.Errorf("non-compliant scrape target sending blank Content-Type, using fallback_scrape_protocol %q", fallbackType) } + // We have a contentType, parse it. mediaType, _, err := mime.ParseMediaType(contentType) if err != nil { - return NewPromParser(b, st), err + if fallbackType == "" { + retErr := fmt.Errorf("cannot parse Content-Type %q and no fallback_scrape_protocol for target", contentType) + return "", errors.Join(retErr, err) + } + retErr := fmt.Errorf("could not parse received Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType) + return fallbackType, errors.Join(retErr, err) } + + // We have a valid media type, either we recognise it and can use it + // or we have to error. + switch mediaType { + case "application/openmetrics-text", "application/vnd.google.protobuf", "text/plain": + return mediaType, nil + } + // We're here because we have no recognised mediaType. + if fallbackType == "" { + return "", fmt.Errorf("received unsupported Content-Type %q and no fallback_scrape_protocol specified for target", contentType) + } + return fallbackType, fmt.Errorf("received unsupported Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType) +} + +// New returns a new parser of the byte slice. +// +// This function no longer guarantees to return a valid parser. +// +// It only returns a valid parser if the supplied contentType and fallbackType allow. +// An error may also be returned if fallbackType had to be used or there was some +// other error parsing the supplied Content-Type. +// If the returned parser is nil then the scrape must fail. +func New(b []byte, contentType, fallbackType string, parseClassicHistograms, skipOMCTSeries, enableTypeAndUnitLabels bool, st *labels.SymbolTable) (Parser, error) { + mediaType, err := extractMediaType(contentType, fallbackType) + // err may be nil or something we want to warn about. + switch mediaType { case "application/openmetrics-text": - return NewOpenMetricsParser(b, st), nil + return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) { + o.skipCTSeries = skipOMCTSeries + o.enableTypeAndUnitLabels = enableTypeAndUnitLabels + }), err case "application/vnd.google.protobuf": - return NewProtobufParser(b, parseClassicHistograms, st), nil + return NewProtobufParser(b, parseClassicHistograms, enableTypeAndUnitLabels, st), err + case "text/plain": + return NewPromParser(b, st, enableTypeAndUnitLabels), err default: - return NewPromParser(b, st), nil + return nil, err } } @@ -106,8 +156,8 @@ const ( EntryInvalid Entry = -1 EntryType Entry = 0 EntryHelp Entry = 1 - EntrySeries Entry = 2 // A series with a simple float64 as value. + EntrySeries Entry = 2 // EntrySeries marks a series with a simple float64 as value. EntryComment Entry = 3 EntryUnit Entry = 4 - EntryHistogram Entry = 5 // A series with a native histogram as a value. + EntryHistogram Entry = 5 // EntryHistogram marks a series with a native histogram as a value. ) diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index c644565628..3034fdade1 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -14,16 +14,29 @@ package textparse import ( + "errors" + "io" "testing" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/testutil" ) func TestNewParser(t *testing.T) { t.Parallel() + requireNilParser := func(t *testing.T, p Parser) { + require.Nil(t, p) + } + requirePromParser := func(t *testing.T, p Parser) { require.NotNil(t, p) _, ok := p.(*PromParser) @@ -36,34 +49,83 @@ func TestNewParser(t *testing.T) { require.True(t, ok) } + requireProtobufParser := func(t *testing.T, p Parser) { + require.NotNil(t, p) + _, ok := p.(*ProtobufParser) + require.True(t, ok) + } + for name, tt := range map[string]*struct { - contentType string - validateParser func(*testing.T, Parser) - err string + contentType string + fallbackScrapeProtocol config.ScrapeProtocol + validateParser func(*testing.T, Parser) + err string }{ "empty-string": { - validateParser: requirePromParser, + validateParser: requireNilParser, + err: "non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target", + }, + "empty-string-fallback-text-plain": { + validateParser: requirePromParser, + fallbackScrapeProtocol: config.PrometheusText0_0_4, + err: "non-compliant scrape target sending blank Content-Type, using fallback_scrape_protocol \"text/plain\"", }, "invalid-content-type-1": { contentType: "invalid/", - validateParser: requirePromParser, + validateParser: requireNilParser, err: "expected token after slash", }, + "invalid-content-type-1-fallback-text-plain": { + contentType: "invalid/", + validateParser: requirePromParser, + fallbackScrapeProtocol: config.PrometheusText0_0_4, + err: "expected token after slash", + }, + "invalid-content-type-1-fallback-openmetrics": { + contentType: "invalid/", + validateParser: requireOpenMetricsParser, + fallbackScrapeProtocol: config.OpenMetricsText0_0_1, + err: "expected token after slash", + }, + "invalid-content-type-1-fallback-protobuf": { + contentType: "invalid/", + validateParser: requireProtobufParser, + fallbackScrapeProtocol: config.PrometheusProto, + err: "expected token after slash", + }, "invalid-content-type-2": { contentType: "invalid/invalid/invalid", - validateParser: requirePromParser, + validateParser: requireNilParser, err: "unexpected content after media subtype", }, + "invalid-content-type-2-fallback-text-plain": { + contentType: "invalid/invalid/invalid", + validateParser: requirePromParser, + fallbackScrapeProtocol: config.PrometheusText1_0_0, + err: "unexpected content after media subtype", + }, "invalid-content-type-3": { contentType: "/", - validateParser: requirePromParser, + validateParser: requireNilParser, err: "no media type", }, + "invalid-content-type-3-fallback-text-plain": { + contentType: "/", + validateParser: requirePromParser, + fallbackScrapeProtocol: config.PrometheusText1_0_0, + err: "no media type", + }, "invalid-content-type-4": { contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8", - validateParser: requirePromParser, + validateParser: requireNilParser, err: "duplicate parameter name", }, + "invalid-content-type-4-fallback-open-metrics": { + contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8", + validateParser: requireOpenMetricsParser, + fallbackScrapeProtocol: config.OpenMetricsText1_0_0, + err: "duplicate parameter name", + }, "openmetrics": { contentType: "application/openmetrics-text", validateParser: requireOpenMetricsParser, @@ -80,27 +142,142 @@ func TestNewParser(t *testing.T) { contentType: "text/plain", validateParser: requirePromParser, }, + "protobuf": { + contentType: "application/vnd.google.protobuf", + validateParser: requireProtobufParser, + }, "plain-text-with-version": { contentType: "text/plain; version=0.0.4", validateParser: requirePromParser, }, "some-other-valid-content-type": { contentType: "text/html", - validateParser: requirePromParser, + validateParser: requireNilParser, + err: "received unsupported Content-Type \"text/html\" and no fallback_scrape_protocol specified for target", + }, + "some-other-valid-content-type-fallback-text-plain": { + contentType: "text/html", + validateParser: requirePromParser, + fallbackScrapeProtocol: config.PrometheusText0_0_4, + err: "received unsupported Content-Type \"text/html\", using fallback_scrape_protocol \"text/plain\"", }, } { t.Run(name, func(t *testing.T) { tt := tt // Copy to local variable before going parallel. t.Parallel() - p, err := New([]byte{}, tt.contentType, false, labels.NewSymbolTable()) + fallbackProtoMediaType := tt.fallbackScrapeProtocol.HeaderMediaType() + + p, err := New([]byte{}, tt.contentType, fallbackProtoMediaType, false, false, false, labels.NewSymbolTable()) tt.validateParser(t, p) if tt.err == "" { require.NoError(t, err) } else { - require.Error(t, err) - require.Contains(t, err.Error(), tt.err) + require.ErrorContains(t, err, tt.err) } }) } } + +// parsedEntry represents data that is parsed for each entry. +type parsedEntry struct { + // In all but EntryComment, EntryInvalid. + m string + + // In EntryHistogram. + shs *histogram.Histogram + fhs *histogram.FloatHistogram + + // In EntrySeries. + v float64 + + // In EntrySeries and EntryHistogram. + lset labels.Labels + t *int64 + es []exemplar.Exemplar + ct int64 + + // In EntryType. + typ model.MetricType + // In EntryHelp. + help string + // In EntryUnit. + unit string + // In EntryComment. + comment string +} + +func requireEntries(t *testing.T, exp, got []parsedEntry) { + t.Helper() + + testutil.RequireEqualWithOptions(t, exp, got, []cmp.Option{ + // We reuse slices so we sometimes have empty vs nil differences + // we need to ignore with cmpopts.EquateEmpty(). + // However we have to filter out labels, as only + // one comparer per type has to be specified, + // and RequireEqualWithOptions uses + // cmp.Comparer(labels.Equal). + cmp.FilterValues(func(x, y any) bool { + _, xIsLabels := x.(labels.Labels) + _, yIsLabels := y.(labels.Labels) + return !xIsLabels && !yIsLabels + }, cmpopts.EquateEmpty()), + cmp.AllowUnexported(parsedEntry{}), + }) +} + +func testParse(t *testing.T, p Parser) (ret []parsedEntry) { + t.Helper() + + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + var got parsedEntry + var m []byte + switch et { + case EntryInvalid: + t.Fatal("entry invalid not expected") + case EntrySeries, EntryHistogram: + var ts *int64 + if et == EntrySeries { + m, ts, got.v = p.Series() + } else { + m, ts, got.shs, got.fhs = p.Histogram() + } + if ts != nil { + // TODO(bwplotka): Change to 0 in the interface for set check to + // avoid pointer mangling. + got.t = int64p(*ts) + } + got.m = string(m) + p.Labels(&got.lset) + got.ct = p.CreatedTimestamp() + + for e := (exemplar.Exemplar{}); p.Exemplar(&e); { + got.es = append(got.es, e) + } + case EntryType: + m, got.typ = p.Type() + got.m = string(m) + + case EntryHelp: + m, h := p.Help() + got.m = string(m) + got.help = string(h) + + case EntryUnit: + m, u := p.Unit() + got.m = string(m) + got.unit = string(u) + + case EntryComment: + got.comment = string(p.Comment()) + } + ret = append(ret, got) + } + return ret +} diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go new file mode 100644 index 0000000000..e7cfcc028e --- /dev/null +++ b/model/textparse/nhcbparse.go @@ -0,0 +1,389 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "errors" + "io" + "math" + "strconv" + "strings" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/convertnhcb" +) + +type collectionState int + +const ( + stateStart collectionState = iota + stateCollecting + stateEmitting + stateInhibiting // Inhibiting NHCB, because there was an exponential histogram with the same labels. +) + +// The NHCBParser wraps a Parser and converts classic histograms to native +// histograms with custom buckets. +// +// Since Parser interface is line based, this parser needs to keep track +// of the last classic histogram series it saw to collate them into a +// single native histogram. +// +// Note: +// - Only series that have the histogram metadata type are considered for +// conversion. +// - The classic series are also returned if keepClassicHistograms is true. +type NHCBParser struct { + // The parser we're wrapping. + parser Parser + // Option to keep classic histograms along with converted histograms. + keepClassicHistograms bool + + // Labels builder. + builder labels.ScratchBuilder + + // State of the parser. + state collectionState + + // Caches the values from the underlying parser. + // For Series and Histogram. + bytes []byte + ts *int64 + value float64 + h *histogram.Histogram + fh *histogram.FloatHistogram + // For Metric. + lset labels.Labels + // For Type. + bName []byte + typ model.MetricType + + // Caches the entry itself if we are inserting a converted NHCB + // halfway through. + entry Entry + err error + + // Caches the values and metric for the inserted converted NHCB. + bytesNHCB []byte + hNHCB *histogram.Histogram + fhNHCB *histogram.FloatHistogram + lsetNHCB labels.Labels + exemplars []exemplar.Exemplar + ctNHCB int64 + metricStringNHCB string + + // Collates values from the classic histogram series to build + // the converted histogram later. + tempLsetNHCB labels.Labels + tempNHCB convertnhcb.TempHistogram + tempExemplars []exemplar.Exemplar + tempExemplarCount int + tempCT int64 + + // Remembers the last base histogram metric name (assuming it's + // a classic histogram) so we can tell if the next float series + // is part of the same classic histogram. + lastHistogramName string + lastHistogramLabelsHash uint64 + // Reused buffer for hashing labels. + hBuffer []byte +} + +func NewNHCBParser(p Parser, st *labels.SymbolTable, keepClassicHistograms bool) Parser { + return &NHCBParser{ + parser: p, + keepClassicHistograms: keepClassicHistograms, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + tempNHCB: convertnhcb.NewTempHistogram(), + } +} + +func (p *NHCBParser) Series() ([]byte, *int64, float64) { + return p.bytes, p.ts, p.value +} + +func (p *NHCBParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + if p.state == stateEmitting { + return p.bytesNHCB, p.ts, p.hNHCB, p.fhNHCB + } + return p.bytes, p.ts, p.h, p.fh +} + +func (p *NHCBParser) Help() ([]byte, []byte) { + return p.parser.Help() +} + +func (p *NHCBParser) Type() ([]byte, model.MetricType) { + return p.bName, p.typ +} + +func (p *NHCBParser) Unit() ([]byte, []byte) { + return p.parser.Unit() +} + +func (p *NHCBParser) Comment() []byte { + return p.parser.Comment() +} + +func (p *NHCBParser) Labels(l *labels.Labels) { + if p.state == stateEmitting { + *l = p.lsetNHCB + return + } + *l = p.lset +} + +func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool { + if p.state == stateEmitting { + if len(p.exemplars) == 0 { + return false + } + *ex = p.exemplars[0] + p.exemplars = p.exemplars[1:] + return true + } + return p.parser.Exemplar(ex) +} + +func (p *NHCBParser) CreatedTimestamp() int64 { + switch p.state { + case stateStart, stateInhibiting: + if p.entry == EntrySeries || p.entry == EntryHistogram { + return p.parser.CreatedTimestamp() + } + case stateCollecting: + return p.tempCT + case stateEmitting: + return p.ctNHCB + } + return 0 +} + +func (p *NHCBParser) Next() (Entry, error) { + for { + if p.state == stateEmitting { + p.state = stateStart + if p.entry == EntrySeries { + isNHCB := p.handleClassicHistogramSeries(p.lset) + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + continue + } + } + return p.entry, p.err + } + + p.entry, p.err = p.parser.Next() + if p.err != nil { + if errors.Is(p.err, io.EOF) && p.processNHCB() { + return EntryHistogram, nil + } + return EntryInvalid, p.err + } + switch p.entry { + case EntrySeries: + p.bytes, p.ts, p.value = p.parser.Series() + p.parser.Labels(&p.lset) + var isNHCB bool + switch p.state { + case stateCollecting: + if p.differentMetric() && p.processNHCB() { + // We are collecting classic series, but the next series + // has different type or labels. If we can convert what + // we have collected so far to NHCB, then we can return it. + return EntryHistogram, nil + } + isNHCB = p.handleClassicHistogramSeries(p.lset) + case stateInhibiting: + if p.differentMetric() { + // Next has different labels than the previous exponential + // histogram so we can start collecting classic histogram + // series. + p.state = stateStart + isNHCB = p.handleClassicHistogramSeries(p.lset) + } else { + // Next has the same labels as the previous exponential + // histogram, so we are still in the inhibiting state and + // we should not convert to NHCB. + isNHCB = false + } + case stateStart: + isNHCB = p.handleClassicHistogramSeries(p.lset) + default: + // This should not happen. + return EntryInvalid, errors.New("unexpected state in NHCBParser") + } + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + continue + } + return p.entry, p.err + case EntryHistogram: + p.state = stateInhibiting + p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() + p.parser.Labels(&p.lset) + p.storeExponentialLabels() + case EntryType: + p.bName, p.typ = p.parser.Type() + } + if p.processNHCB() { + return EntryHistogram, nil + } + return p.entry, p.err + } +} + +// Return true if labels have changed and we should emit the NHCB. +func (p *NHCBParser) differentMetric() bool { + if p.typ != model.MetricTypeHistogram { + // Different metric type. + return true + } + _, name := convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) + if p.lastHistogramName != name { + // Different metric name. + return true + } + nextHash, _ := p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) + // Different label values. + return p.lastHistogramLabelsHash != nextHash +} + +// Save the label set of the classic histogram without suffix and bucket `le` label. +func (p *NHCBParser) storeClassicLabels(name string) { + p.lastHistogramName = name + p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) +} + +func (p *NHCBParser) storeExponentialLabels() { + p.lastHistogramName = p.lset.Get(labels.MetricName) + p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer) +} + +// handleClassicHistogramSeries collates the classic histogram series to be converted to NHCB +// if it is actually a classic histogram series (and not a normal float series) and if there +// isn't already a native histogram with the same name (assuming it is always processed +// right before the classic histograms) and returns true if the collation was done. +func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { + if p.typ != model.MetricTypeHistogram { + return false + } + mName := lset.Get(labels.MetricName) + // Sanity check to ensure that the TYPE metadata entry name is the same as the base name. + suffixType, name := convertnhcb.GetHistogramMetricBaseName(mName) + if name != string(p.bName) { + return false + } + switch suffixType { + case convertnhcb.SuffixBucket: + if !lset.Has(labels.BucketLabel) { + // This should not really happen. + return false + } + le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64) + if err == nil && !math.IsNaN(le) { + p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) { + _ = hist.SetBucketCount(le, p.value) + }) + return true + } + case convertnhcb.SuffixCount: + p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) { + _ = hist.SetCount(p.value) + }) + return true + case convertnhcb.SuffixSum: + p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) { + _ = hist.SetSum(p.value) + }) + return true + } + return false +} + +func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, name string, updateHist func(*convertnhcb.TempHistogram)) { + if p.state != stateCollecting { + p.storeClassicLabels(name) + p.tempCT = p.parser.CreatedTimestamp() + p.state = stateCollecting + p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, name) + } + p.storeExemplars() + updateHist(&p.tempNHCB) +} + +func (p *NHCBParser) storeExemplars() { + for ex := p.nextExemplarPtr(); p.parser.Exemplar(ex); ex = p.nextExemplarPtr() { + p.tempExemplarCount++ + } +} + +func (p *NHCBParser) nextExemplarPtr() *exemplar.Exemplar { + switch { + case p.tempExemplarCount == len(p.tempExemplars)-1: + // Reuse the previously allocated exemplar, it was not filled up. + case len(p.tempExemplars) == cap(p.tempExemplars): + // Let the runtime grow the slice. + p.tempExemplars = append(p.tempExemplars, exemplar.Exemplar{}) + default: + // Take the next element into use. + p.tempExemplars = p.tempExemplars[:len(p.tempExemplars)+1] + } + return &p.tempExemplars[len(p.tempExemplars)-1] +} + +func (p *NHCBParser) swapExemplars() { + p.exemplars = p.tempExemplars[:p.tempExemplarCount] + p.tempExemplars = p.tempExemplars[:0] +} + +// processNHCB converts the collated classic histogram series to NHCB and caches the info +// to be returned to callers. Retruns true if the conversion was successful. +func (p *NHCBParser) processNHCB() bool { + if p.state != stateCollecting { + return false + } + h, fh, err := p.tempNHCB.Convert() + if err == nil { + if h != nil { + if err := h.Validate(); err != nil { + return false + } + p.hNHCB = h + p.fhNHCB = nil + } else if fh != nil { + if err := fh.Validate(); err != nil { + return false + } + p.hNHCB = nil + p.fhNHCB = fh + } + p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",") + p.bytesNHCB = []byte(p.metricStringNHCB) + p.lsetNHCB = p.tempLsetNHCB + p.swapExemplars() + p.ctNHCB = p.tempCT + p.state = stateEmitting + } else { + p.state = stateStart + } + p.tempNHCB.Reset() + p.tempExemplarCount = 0 + p.tempCT = 0 + return err == nil +} diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go new file mode 100644 index 0000000000..61ef1df2b1 --- /dev/null +++ b/model/textparse/nhcbparse_test.go @@ -0,0 +1,1128 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "bytes" + "encoding/binary" + "strconv" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" +) + +func TestNHCBParserOnOMParser(t *testing.T) { + // The input is taken originally from TestOpenMetricsParse, with additional tests for the NHCBParser. + + input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +# UNIT go_gc_duration_seconds seconds +go_gc_duration_seconds{quantile="0"} 4.9351e-05 +go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 +go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05 +# HELP nohelp1 +# HELP help2 escape \ \n \\ \" \x chars +# UNIT nounit +go_gc_duration_seconds{quantile="1.0",a="b"} 8.3835e-05 +go_gc_duration_seconds_count 99 +some:aggregate:rate5m{a_b="c"} 1 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 33 123.123 +# TYPE hh histogram +hh_bucket{le="+Inf"} 1 +# TYPE gh gaugehistogram +gh_bucket{le="+Inf"} 1 +# TYPE hhh histogram +hhh_bucket{le="+Inf"} 1 # {id="histogram-bucket-test"} 4 +hhh_count 1 # {id="histogram-count-test"} 4 +# TYPE ggh gaugehistogram +ggh_bucket{le="+Inf"} 1 # {id="gaugehistogram-bucket-test",xx="yy"} 4 123.123 +ggh_count 1 # {id="gaugehistogram-count-test",xx="yy"} 4 123.123 +# TYPE smr_seconds summary +smr_seconds_count 2.0 # {id="summary-count-test"} 1 123.321 +smr_seconds_sum 42.0 # {id="summary-sum-test"} 1 123.321 +# TYPE ii info +ii{foo="bar"} 1 +# TYPE ss stateset +ss{ss="foo"} 1 +ss{ss="bar"} 0 +ss{A="a"} 0 +# TYPE un unknown +_metric_starting_with_underscore 1 +testmetric{_label_starting_with_underscore="foo"} 1 +testmetric{label="\"bar\""} 1 +# HELP foo Counter with and without labels to certify CT is parsed for both cases +# TYPE foo counter +foo_total 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created 1520872607.123 +foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created{a="b"} 1520872607.123 +# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far +# TYPE bar summary +bar_count 17.0 +bar_sum 324789.3 +bar{quantile="0.95"} 123.7 +bar{quantile="0.99"} 150.0 +bar_created 1520872608.124 +# HELP baz Histogram with the same objective as above's summary +# TYPE baz histogram +baz_bucket{le="0.0"} 0 +baz_bucket{le="+Inf"} 17 +baz_count 17 +baz_sum 324789.3 +baz_created 1520872609.125 +# HELP fizz_created Gauge which shouldn't be parsed as CT +# TYPE fizz_created gauge +fizz_created 17.0 +# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 18 +something_sum 324789.4 +something_created 1520430001 +something_bucket{le="0.0"} 1 +something_bucket{le="+Inf"} 18 +something_count{a="b"} 9 +something_sum{a="b"} 42123.0 +something_bucket{a="b",le="0.0"} 8 +something_bucket{a="b",le="+Inf"} 9 +something_created{a="b"} 1520430002 +# HELP yum Summary with _created between sum and quantiles +# TYPE yum summary +yum_count 20 +yum_sum 324789.5 +yum_created 1520430003 +yum{quantile="0.95"} 123.7 +yum{quantile="0.99"} 150.0 +# HELP foobar Summary with _created as the first line +# TYPE foobar summary +foobar_count 21 +foobar_created 1520430004 +foobar_sum 324789.6 +foobar{quantile="0.95"} 123.8 +foobar{quantile="0.99"} 150.1` + + input += "\n# HELP metric foo\x00bar" + input += "\nnull_byte_metric{a=\"abc\x00\"} 1" + input += "\n# EOF\n" + + exp := []parsedEntry{ + { + m: "go_gc_duration_seconds", + help: "A summary of the GC invocation durations.", + }, { + m: "go_gc_duration_seconds", + typ: model.MetricTypeSummary, + }, { + m: "go_gc_duration_seconds", + unit: "seconds", + }, { + m: `go_gc_duration_seconds{quantile="0"}`, + v: 4.9351e-05, + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"), + }, { + m: `go_gc_duration_seconds{quantile="0.25"}`, + v: 7.424100000000001e-05, + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"), + }, { + m: `go_gc_duration_seconds{quantile="0.5",a="b"}`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"), + }, { + m: "nohelp1", + help: "", + }, { + m: "help2", + help: "escape \\ \n \\ \" \\x chars", + }, { + m: "nounit", + unit: "", + }, { + m: `go_gc_duration_seconds{quantile="1.0",a="b"}`, + v: 8.3835e-05, + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), + }, { + m: `go_gc_duration_seconds_count`, + v: 99, + lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"), + }, { + m: `some:aggregate:rate5m{a_b="c"}`, + v: 1, + lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), + }, { + m: "go_goroutines", + help: "Number of goroutines that currently exist.", + }, { + m: "go_goroutines", + typ: model.MetricTypeGauge, + }, { + m: `go_goroutines`, + v: 33, + t: int64p(123123), + lset: labels.FromStrings("__name__", "go_goroutines"), + }, { + m: "hh", + typ: model.MetricTypeHistogram, + }, { + m: `hh{}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 1, + Sum: 0.0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + // Custom values are empty as we do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "hh"), + }, { + m: "gh", + typ: model.MetricTypeGaugeHistogram, + }, { + m: `gh_bucket{le="+Inf"}`, + v: 1, + lset: labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"), + }, { + m: "hhh", + typ: model.MetricTypeHistogram, + }, { + m: `hhh{}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 1, + Sum: 0.0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + // Custom values are empty as we do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "hhh"), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4}, + {Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4}, + }, + }, { + m: "ggh", + typ: model.MetricTypeGaugeHistogram, + }, { + m: `ggh_bucket{le="+Inf"}`, + v: 1, + lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}}, + }, { + m: `ggh_count`, + v: 1, + lset: labels.FromStrings("__name__", "ggh_count"), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}}, + }, { + m: "smr_seconds", + typ: model.MetricTypeSummary, + }, { + m: `smr_seconds_count`, + v: 2, + lset: labels.FromStrings("__name__", "smr_seconds_count"), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321}}, + }, { + m: `smr_seconds_sum`, + v: 42, + lset: labels.FromStrings("__name__", "smr_seconds_sum"), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}}, + }, { + m: "ii", + typ: model.MetricTypeInfo, + }, { + m: `ii{foo="bar"}`, + v: 1, + lset: labels.FromStrings("__name__", "ii", "foo", "bar"), + }, { + m: "ss", + typ: model.MetricTypeStateset, + }, { + m: `ss{ss="foo"}`, + v: 1, + lset: labels.FromStrings("__name__", "ss", "ss", "foo"), + }, { + m: `ss{ss="bar"}`, + v: 0, + lset: labels.FromStrings("__name__", "ss", "ss", "bar"), + }, { + m: `ss{A="a"}`, + v: 0, + lset: labels.FromStrings("A", "a", "__name__", "ss"), + }, { + m: "un", + typ: model.MetricTypeUnknown, + }, { + m: "_metric_starting_with_underscore", + v: 1, + lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"), + }, { + m: "testmetric{_label_starting_with_underscore=\"foo\"}", + v: 1, + lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"), + }, { + m: "testmetric{label=\"\\\"bar\\\"\"}", + v: 1, + lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), + }, { + m: "foo", + help: "Counter with and without labels to certify CT is parsed for both cases", + }, { + m: "foo", + typ: model.MetricTypeCounter, + }, { + m: "foo_total", + v: 17, + lset: labels.FromStrings("__name__", "foo_total"), + t: int64p(1520879607789), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}}, + ct: 1520872607123, + }, { + m: `foo_total{a="b"}`, + v: 17.0, + lset: labels.FromStrings("__name__", "foo_total", "a", "b"), + t: int64p(1520879607789), + es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}}, + ct: 1520872607123, + }, { + m: "bar", + help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", + }, { + m: "bar", + typ: model.MetricTypeSummary, + }, { + m: "bar_count", + v: 17.0, + lset: labels.FromStrings("__name__", "bar_count"), + ct: 1520872608124, + }, { + m: "bar_sum", + v: 324789.3, + lset: labels.FromStrings("__name__", "bar_sum"), + ct: 1520872608124, + }, { + m: `bar{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), + ct: 1520872608124, + }, { + m: `bar{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), + ct: 1520872608124, + }, { + m: "baz", + help: "Histogram with the same objective as above's summary", + }, { + m: "baz", + typ: model.MetricTypeHistogram, + }, { + m: `baz{}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 17, + Sum: 324789.3, + PositiveSpans: []histogram.Span{{Offset: 1, Length: 1}}, // The first bucket has 0 count so we don't store it and Offset is 1. + PositiveBuckets: []int64{17}, + CustomValues: []float64{0.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "baz"), + ct: 1520872609125, + }, { + m: "fizz_created", + help: "Gauge which shouldn't be parsed as CT", + }, { + m: "fizz_created", + typ: model.MetricTypeGauge, + }, { + m: `fizz_created`, + v: 17, + lset: labels.FromStrings("__name__", "fizz_created"), + }, { + m: "something", + help: "Histogram with _created between buckets and summary", + }, { + m: "something", + typ: model.MetricTypeHistogram, + }, { + m: `something{}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 18, + Sum: 324789.4, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{1, 16}, + CustomValues: []float64{0.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "something"), + ct: 1520430001000, + }, { + m: `something{a="b"}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 9, + Sum: 42123.0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{8, -7}, + CustomValues: []float64{0.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "something", "a", "b"), + ct: 1520430002000, + }, { + m: "yum", + help: "Summary with _created between sum and quantiles", + }, { + m: "yum", + typ: model.MetricTypeSummary, + }, { + m: `yum_count`, + v: 20, + lset: labels.FromStrings("__name__", "yum_count"), + ct: 1520430003000, + }, { + m: `yum_sum`, + v: 324789.5, + lset: labels.FromStrings("__name__", "yum_sum"), + ct: 1520430003000, + }, { + m: `yum{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"), + ct: 1520430003000, + }, { + m: `yum{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"), + ct: 1520430003000, + }, { + m: "foobar", + help: "Summary with _created as the first line", + }, { + m: "foobar", + typ: model.MetricTypeSummary, + }, { + m: `foobar_count`, + v: 21, + lset: labels.FromStrings("__name__", "foobar_count"), + ct: 1520430004000, + }, { + m: `foobar_sum`, + v: 324789.6, + lset: labels.FromStrings("__name__", "foobar_sum"), + ct: 1520430004000, + }, { + m: `foobar{quantile="0.95"}`, + v: 123.8, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"), + ct: 1520430004000, + }, { + m: `foobar{quantile="0.99"}`, + v: 150.1, + lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"), + ct: 1520430004000, + }, { + m: "metric", + help: "foo\x00bar", + }, { + m: "null_byte_metric{a=\"abc\x00\"}", + v: 1, + lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"), + }, + } + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + p = NewNHCBParser(p, labels.NewSymbolTable(), false) + got := testParse(t, p) + requireEntries(t, exp, got) +} + +func TestNHCBParserOMParser_MultipleHistograms(t *testing.T) { + // The input is taken originally from TestOpenMetricsParse, with additional tests for the NHCBParser. + + input := `# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 18 +something_sum 324789.4 +something_bucket{le="0.0"} 1 # {id="something-test"} -2.0 +something_bucket{le="1.0"} 16 # {id="something-test"} 0.5 +something_bucket{le="+Inf"} 18 # {id="something-test"} 8 +something_count{a="b"} 9 +something_sum{a="b"} 42123.0 +something_bucket{a="b",le="0.0"} 8 # {id="something-test"} 0.0 123.321 +something_bucket{a="b",le="1.0"} 8 +something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000 +# EOF +` + + exp := []parsedEntry{ + { + m: "something", + help: "Histogram with _created between buckets and summary", + }, { + m: "something", + typ: model.MetricTypeHistogram, + }, { + m: `something{}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 18, + Sum: 324789.4, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{1, 14, -13}, + CustomValues: []float64{0.0, 1.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "something"), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "something-test"), Value: -2.0}, + {Labels: labels.FromStrings("id", "something-test"), Value: 0.5}, + {Labels: labels.FromStrings("id", "something-test"), Value: 8.0}, + }, + }, { + m: `something{a="b"}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 9, + Sum: 42123.0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{8, -8, 1}, + CustomValues: []float64{0.0, 1.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "something", "a", "b"), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "something-test"), Value: 0.0, HasTs: true, Ts: 123321}, + {Labels: labels.FromStrings("id", "something-test"), Value: 2e100, HasTs: true, Ts: 123000}, + }, + }, + } + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + + p = NewNHCBParser(p, labels.NewSymbolTable(), false) + got := testParse(t, p) + requireEntries(t, exp, got) +} + +// Verify the requirement tables from +// https://github.com/prometheus/prometheus/issues/13532 . +// "classic" means the option "always_scrape_classic_histograms". +// "nhcb" means the option "convert_classic_histograms_to_nhcb". +// +// Case 1. Only classic histogram is exposed. +// +// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |. +// | classic=false, nhcb=false | YES | NO | NO |. +// | classic=true, nhcb=false | YES | NO | NO |. +// | classic=false, nhcb=true | NO | NO | YES |. +// | classic=true, nhcb=true | YES | NO | YES |. +// +// Case 2. Both classic and exponential histograms are exposed. +// +// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |. +// | classic=false, nhcb=false | NO | YES | NO |. +// | classic=true, nhcb=false | YES | YES | NO |. +// | classic=false, nhcb=true | NO | YES | NO |. +// | classic=true, nhcb=true | YES | YES | NO |. +// +// Case 3. Only exponential histogram is exposed. +// +// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |. +// | classic=false, nhcb=false | NO | YES | NO |. +// | classic=true, nhcb=false | NO | YES | NO |. +// | classic=false, nhcb=true | NO | YES | NO |. +// | classic=true, nhcb=true | NO | YES | NO |. +func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) { + type requirement struct { + expectClassic bool + expectExponential bool + expectNHCB bool + } + + cases := []map[string]requirement{ + // Case 1. + { + "classic=false, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false}, + "classic=true, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false}, + "classic=false, nhcb=true": {expectClassic: false, expectExponential: false, expectNHCB: true}, + "classic=true, nhcb=true": {expectClassic: true, expectExponential: false, expectNHCB: true}, + }, + // Case 2. + { + "classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=false": {expectClassic: true, expectExponential: true, expectNHCB: false}, + "classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=true": {expectClassic: true, expectExponential: true, expectNHCB: false}, + }, + // Case 3. + { + "classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false}, + }, + } + + // Create parser from keep classic option. + type parserFactory func(bool) Parser + + type testCase struct { + name string + parser parserFactory + classic bool + nhcb bool + exp []parsedEntry + } + + type parserOptions struct { + useUTF8sep bool + hasCreatedTimeStamp bool + } + // Defines the parser name, the Parser factory and the test cases + // supported by the parser and parser options. + parsers := []func() (string, parserFactory, []int, parserOptions){ + func() (string, parserFactory, []int, parserOptions) { + factory := func(keepClassic bool) Parser { + inputBuf := createTestProtoBufHistogram(t) + return NewProtobufParser(inputBuf.Bytes(), keepClassic, false, labels.NewSymbolTable()) + } + return "ProtoBuf", factory, []int{1, 2, 3}, parserOptions{useUTF8sep: true, hasCreatedTimeStamp: true} + }, + func() (string, parserFactory, []int, parserOptions) { + factory := func(_ bool) Parser { + input := createTestOpenMetricsHistogram() + return NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + } + return "OpenMetrics", factory, []int{1}, parserOptions{hasCreatedTimeStamp: true} + }, + func() (string, parserFactory, []int, parserOptions) { + factory := func(_ bool) Parser { + input := createTestPromHistogram() + return NewPromParser([]byte(input), labels.NewSymbolTable(), false) + } + return "Prometheus", factory, []int{1}, parserOptions{} + }, + } + + testCases := []testCase{} + for _, parser := range parsers { + for _, classic := range []bool{false, true} { + for _, nhcb := range []bool{false, true} { + parserName, parser, supportedCases, options := parser() + requirementName := "classic=" + strconv.FormatBool(classic) + ", nhcb=" + strconv.FormatBool(nhcb) + tc := testCase{ + name: "parser=" + parserName + ", " + requirementName, + parser: parser, + classic: classic, + nhcb: nhcb, + exp: []parsedEntry{}, + } + for _, caseNumber := range supportedCases { + caseI := cases[caseNumber-1] + req, ok := caseI[requirementName] + require.True(t, ok, "Case %d does not have requirement %s", caseNumber, requirementName) + metric := "test_histogram" + strconv.Itoa(caseNumber) + tc.exp = append(tc.exp, parsedEntry{ + m: metric, + help: "Test histogram " + strconv.Itoa(caseNumber), + }) + tc.exp = append(tc.exp, parsedEntry{ + m: metric, + typ: model.MetricTypeHistogram, + }) + + var ct int64 + if options.hasCreatedTimeStamp { + ct = 1000 + } + + var bucketForMetric func(string) string + if options.useUTF8sep { + bucketForMetric = func(s string) string { + return "_bucket\xffle\xff" + s + } + } else { + bucketForMetric = func(s string) string { + return "_bucket{le=\"" + s + "\"}" + } + } + + if req.expectExponential { + // Always expect exponential histogram first. + exponentialSeries := []parsedEntry{ + { + m: metric, + shs: &histogram.Histogram{ + Schema: 3, + Count: 175, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + ZeroCount: 2, + PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}}, + NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}}, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings("__name__", metric), + t: int64p(1234568), + ct: ct, + }, + } + tc.exp = append(tc.exp, exponentialSeries...) + } + if req.expectClassic { + // Always expect classic histogram series after exponential. + classicSeries := []parsedEntry{ + { + m: metric + "_count", + v: 175, + lset: labels.FromStrings("__name__", metric+"_count"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + "_sum", + v: 0.0008280461746287094, + lset: labels.FromStrings("__name__", metric+"_sum"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("-0.0004899999999999998"), + v: 2, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0004899999999999998"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("-0.0003899999999999998"), + v: 4, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0003899999999999998"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("-0.0002899999999999998"), + v: 16, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0002899999999999998"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("+Inf"), + v: 175, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "+Inf"), + t: int64p(1234568), + ct: ct, + }, + } + tc.exp = append(tc.exp, classicSeries...) + } + if req.expectNHCB { + // Always expect NHCB series after classic. + nhcbSeries := []parsedEntry{ + { + m: metric + "{}", + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 175, + Sum: 0.0008280461746287094, + PositiveSpans: []histogram.Span{{Length: 4}}, + PositiveBuckets: []int64{2, 0, 10, 147}, + CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998}, + }, + lset: labels.FromStrings("__name__", metric), + t: int64p(1234568), + ct: ct, + }, + } + tc.exp = append(tc.exp, nhcbSeries...) + } + } + testCases = append(testCases, tc) + } + } + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + p := tc.parser(tc.classic) + if tc.nhcb { + p = NewNHCBParser(p, labels.NewSymbolTable(), tc.classic) + } + got := testParse(t, p) + requireEntries(t, tc.exp, got) + }) + } +} + +func createTestProtoBufHistogram(t *testing.T) *bytes.Buffer { + testMetricFamilies := []string{`name: "test_histogram1" +help: "Test histogram 1" +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + > + timestamp_ms: 1234568 +>`, `name: "test_histogram2" +help: "Test histogram 2" +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +>`, `name: "test_histogram3" +help: "Test histogram 3" +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + sample_count: 175 + sample_sum: 0.0008280461746287094 + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> +`} + + varintBuf := make([]byte, binary.MaxVarintLen32) + buf := &bytes.Buffer{} + + for _, tmf := range testMetricFamilies { + pb := &dto.MetricFamily{} + // From text to proto message. + require.NoError(t, proto.UnmarshalText(tmf, pb)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(pb) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) + } + + return buf +} + +func createTestOpenMetricsHistogram() string { + return `# HELP test_histogram1 Test histogram 1 +# TYPE test_histogram1 histogram +test_histogram1_count 175 1234.568 +test_histogram1_sum 0.0008280461746287094 1234.568 +test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234.568 +test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234.568 +test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234.568 +test_histogram1_bucket{le="+Inf"} 175 1234.568 +test_histogram1_created 1 +# EOF` +} + +func createTestPromHistogram() string { + return `# HELP test_histogram1 Test histogram 1 +# TYPE test_histogram1 histogram +test_histogram1_count 175 1234568 +test_histogram1_sum 0.0008280461746287094 1234568 +test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234568 +test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234568 +test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234568 +test_histogram1_bucket{le="+Inf"} 175 1234568` +} + +func TestNHCBParserErrorHandling(t *testing.T) { + input := `# HELP something Histogram with non cumulative buckets +# TYPE something histogram +something_count 18 +something_sum 324789.4 +something_created 1520430001 +something_bucket{le="0.0"} 18 +something_bucket{le="+Inf"} 1 +something_count{a="b"} 9 +something_sum{a="b"} 42123 +something_created{a="b"} 1520430002 +something_bucket{a="b",le="0.0"} 1 +something_bucket{a="b",le="+Inf"} 9 +# EOF` + exp := []parsedEntry{ + { + m: "something", + help: "Histogram with non cumulative buckets", + }, + { + m: "something", + typ: model.MetricTypeHistogram, + }, + // The parser should skip the series with non-cumulative buckets. + { + m: `something{a="b"}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 9, + Sum: 42123.0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{1, 7}, + CustomValues: []float64{0.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "something", "a", "b"), + ct: 1520430002000, + }, + } + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + p = NewNHCBParser(p, labels.NewSymbolTable(), false) + got := testParse(t, p) + requireEntries(t, exp, got) +} + +func TestNHCBParserResetLastExponential(t *testing.T) { + testMetricFamilies := []string{`name: "test_histogram1" +help: "Test histogram 1" +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + sample_count: 175 + sample_sum: 0.0008280461746287094 + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> +`, // Regression test to see that state resets after exponential native histogram. + `name: "test_histogram2" +help: "Test histogram 2" +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + > + timestamp_ms: 1234568 +>`} + + varintBuf := make([]byte, binary.MaxVarintLen32) + buf := &bytes.Buffer{} + + for _, tmf := range testMetricFamilies { + pb := &dto.MetricFamily{} + // From text to proto message. + require.NoError(t, proto.UnmarshalText(tmf, pb)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(pb) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) + } + + exp := []parsedEntry{ + { + m: "test_histogram1", + help: "Test histogram 1", + }, + { + m: "test_histogram1", + typ: model.MetricTypeHistogram, + }, + // The parser should skip the series with non-cumulative buckets. + { + m: `test_histogram1`, + shs: &histogram.Histogram{ + Schema: 3, + Count: 175, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + ZeroCount: 2, + PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}}, + NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}}, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings("__name__", "test_histogram1"), + t: int64p(1234568), + ct: 1000, + }, + { + m: "test_histogram2", + help: "Test histogram 2", + }, + { + m: "test_histogram2", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram2{}", + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 175, + Sum: 0.0008280461746287094, + PositiveSpans: []histogram.Span{{Length: 4}}, + PositiveBuckets: []int64{2, 0, 10, 147}, + CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998}, + }, + lset: labels.FromStrings("__name__", "test_histogram2"), + t: int64p(1234568), + ct: 1000, + }, + } + + p := NewProtobufParser(buf.Bytes(), false, false, labels.NewSymbolTable()) + p = NewNHCBParser(p, labels.NewSymbolTable(), false) + got := testParse(t, p) + requireEntries(t, exp, got) +} diff --git a/model/textparse/openmetricslex.l b/model/textparse/openmetricslex.l index 9afbbbd8bd..09106c52ce 100644 --- a/model/textparse/openmetricslex.l +++ b/model/textparse/openmetricslex.l @@ -69,6 +69,7 @@ S [ ] {S}#{S}\{ l.state = sExemplar; return tComment {L}({L}|{D})* return tLName +\"(\\.|[^\\"\n])*\" l.state = sExemplar; return tQString \} l.state = sEValue; return tBraceClose = l.state = sEValue; return tEqual \"(\\.|[^\\"\n])*\" l.state = sExemplar; return tLValue diff --git a/model/textparse/openmetricslex.l.go b/model/textparse/openmetricslex.l.go index c8789ef60d..c0b2fcdb4d 100644 --- a/model/textparse/openmetricslex.l.go +++ b/model/textparse/openmetricslex.l.go @@ -53,9 +53,9 @@ yystate0: case 8: // start condition: sExemplar goto yystart57 case 9: // start condition: sEValue - goto yystart62 + goto yystart65 case 10: // start condition: sETimestamp - goto yystart68 + goto yystart71 } yystate1: @@ -538,125 +538,153 @@ yystart57: switch { default: goto yyabort - case c == ',': + case c == '"': goto yystate58 - case c == '=': - goto yystate59 - case c == '}': + case c == ',': goto yystate61 + case c == '=': + goto yystate62 + case c == '}': + goto yystate64 case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate60 + goto yystate63 } yystate58: c = l.next() - goto yyrule26 + switch { + default: + goto yyabort + case c == '"': + goto yystate59 + case c == '\\': + goto yystate60 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate58 + } yystate59: c = l.next() - goto yyrule24 + goto yyrule23 yystate60: c = l.next() switch { default: - goto yyrule22 - case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate60 + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate58 } yystate61: c = l.next() - goto yyrule23 + goto yyrule27 yystate62: c = l.next() -yystart62: - switch { - default: - goto yyabort - case c == ' ': - goto yystate63 - case c == '"': - goto yystate65 - } + goto yyrule25 yystate63: c = l.next() switch { default: - goto yyabort - case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate64 + goto yyrule22 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate63 } yystate64: c = l.next() - switch { - default: - goto yyrule27 - case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate64 - } + goto yyrule24 yystate65: c = l.next() +yystart65: switch { default: goto yyabort - case c == '"': + case c == ' ': goto yystate66 - case c == '\\': - goto yystate67 - case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': - goto yystate65 + case c == '"': + goto yystate68 } yystate66: c = l.next() - goto yyrule25 + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate67 + } yystate67: c = l.next() switch { default: - goto yyabort - case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': - goto yystate65 + goto yyrule28 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate67 } yystate68: c = l.next() -yystart68: switch { default: goto yyabort - case c == ' ': - goto yystate70 - case c == '\n': + case c == '"': goto yystate69 + case c == '\\': + goto yystate70 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate68 } yystate69: c = l.next() - goto yyrule29 + goto yyrule26 yystate70: c = l.next() switch { default: goto yyabort - case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate71 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate68 } yystate71: c = l.next() +yystart71: switch { default: - goto yyrule28 + goto yyabort + case c == ' ': + goto yystate73 + case c == '\n': + goto yystate72 + } + +yystate72: + c = l.next() + goto yyrule30 + +yystate73: + c = l.next() + switch { + default: + goto yyabort case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate71 + goto yystate74 + } + +yystate74: + c = l.next() + switch { + default: + goto yyrule29 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate74 } yyrule1: // #{S} @@ -782,39 +810,45 @@ yyrule22: // {L}({L}|{D})* { return tLName } -yyrule23: // \} +yyrule23: // \"(\\.|[^\\"\n])*\" + { + l.state = sExemplar + return tQString + goto yystate0 + } +yyrule24: // \} { l.state = sEValue return tBraceClose goto yystate0 } -yyrule24: // = +yyrule25: // = { l.state = sEValue return tEqual goto yystate0 } -yyrule25: // \"(\\.|[^\\"\n])*\" +yyrule26: // \"(\\.|[^\\"\n])*\" { l.state = sExemplar return tLValue goto yystate0 } -yyrule26: // , +yyrule27: // , { return tComma } -yyrule27: // {S}[^ \n]+ +yyrule28: // {S}[^ \n]+ { l.state = sETimestamp return tValue goto yystate0 } -yyrule28: // {S}[^ \n]+ +yyrule29: // {S}[^ \n]+ { return tTimestamp } -yyrule29: // \n +yyrule30: // \n if true { // avoid go vet determining the below panic will not be reached l.state = sInit return tLinebreak @@ -859,10 +893,10 @@ yyabort: // no lexem recognized goto yystate57 } if false { - goto yystate62 + goto yystate65 } if false { - goto yystate68 + goto yystate71 } } diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index b7ad1dd85c..d9c37a78b7 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -17,19 +17,23 @@ package textparse import ( + "bytes" "errors" "fmt" "io" "math" + "strconv" "strings" "unicode/utf8" + "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/schema" ) type openMetricsLexer struct { @@ -70,17 +74,20 @@ func (l *openMetricsLexer) Error(es string) { // OpenMetricsParser parses samples from a byte slice of samples in the official // OpenMetrics text exposition format. -// This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit +// Specification can be found at https://prometheus.io/docs/specs/om/open_metrics_spec/ type OpenMetricsParser struct { - l *openMetricsLexer - builder labels.ScratchBuilder - series []byte - text []byte - mtype model.MetricType - val float64 - ts int64 - hasTS bool - start int + l *openMetricsLexer + builder labels.ScratchBuilder + series []byte + mfNameLen int // length of metric family name to get from series. + text []byte + mtype model.MetricType + unit string + + val float64 + ts int64 + hasTS bool + start int // offsets is a list of offsets into series that describe the positions // of the metric name and label names and values for this series. // p.offsets[0] is the start character of the metric name. @@ -94,16 +101,65 @@ type OpenMetricsParser struct { exemplarVal float64 exemplarTs int64 hasExemplarTs bool + + // Created timestamp parsing state. + ct int64 + ctHashSet uint64 + // ignoreExemplar instructs the parser to not overwrite exemplars (to keep them while peeking ahead). + ignoreExemplar bool + // visitedMFName is the metric family name of the last visited metric when peeking ahead + // for _created series during the execution of the CreatedTimestamp method. + visitedMFName []byte + skipCTSeries bool + enableTypeAndUnitLabels bool } -// NewOpenMetricsParser returns a new parser of the byte slice. -func NewOpenMetricsParser(b []byte, st *labels.SymbolTable) Parser { - return &OpenMetricsParser{ - l: &openMetricsLexer{b: b}, - builder: labels.NewScratchBuilderWithSymbolTable(st, 16), +type openMetricsParserOptions struct { + skipCTSeries bool + enableTypeAndUnitLabels bool +} + +type OpenMetricsOption func(*openMetricsParserOptions) + +// WithOMParserCTSeriesSkipped turns off exposing _created lines +// as series, which makes those only used for parsing created timestamp +// for `CreatedTimestamp` method purposes. +// +// It's recommended to use this option to avoid using _created lines for other +// purposes than created timestamp, but leave false by default for the +// best-effort compatibility. +func WithOMParserCTSeriesSkipped() OpenMetricsOption { + return func(o *openMetricsParserOptions) { + o.skipCTSeries = true } } +// WithOMParserTypeAndUnitLabels enables type-and-unit-labels mode +// in which parser injects __type__ and __unit__ into labels. +func WithOMParserTypeAndUnitLabels() OpenMetricsOption { + return func(o *openMetricsParserOptions) { + o.enableTypeAndUnitLabels = true + } +} + +// NewOpenMetricsParser returns a new parser for the byte slice with option to skip CT series parsing. +func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsOption) Parser { + options := &openMetricsParserOptions{} + + for _, opt := range opts { + opt(options) + } + + parser := &OpenMetricsParser{ + l: &openMetricsLexer{b: b}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + skipCTSeries: options.skipCTSeries, + enableTypeAndUnitLabels: options.enableTypeAndUnitLabels, + } + + return parser +} + // Series returns the bytes of the series, the timestamp if set, and the value // of the current sample. func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { @@ -145,7 +201,7 @@ func (p *OpenMetricsParser) Type() ([]byte, model.MetricType) { // Must only be called after Next returned a unit entry. // The returned byte slices become invalid after the next call to Next. func (p *OpenMetricsParser) Unit() ([]byte, []byte) { - return p.l.b[p.offsets[0]:p.offsets[1]], p.text + return p.l.b[p.offsets[0]:p.offsets[1]], []byte(p.unit) } // Comment returns the text of the current comment. @@ -155,31 +211,41 @@ func (p *OpenMetricsParser) Comment() []byte { return p.text } -// Metric writes the labels of the current sample into the passed labels. -// It returns the string from which the metric was parsed. -func (p *OpenMetricsParser) Metric(l *labels.Labels) string { - // Copy the buffer to a string: this is only necessary for the return value. +// Labels writes the labels of the current sample into the passed labels. +func (p *OpenMetricsParser) Labels(l *labels.Labels) { + // Defensive copy in case the following keeps a reference. + // See https://github.com/prometheus/prometheus/issues/16490 s := string(p.series) p.builder.Reset() metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start]) - p.builder.Add(labels.MetricName, metricName) + m := schema.Metadata{ + Name: metricName, + Type: p.mtype, + Unit: p.unit, + } + if p.enableTypeAndUnitLabels { + m.AddToLabels(&p.builder) + } else { + p.builder.Add(labels.MetricName, metricName) + } for i := 2; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start b := p.offsets[i+1] - p.start label := unreplace(s[a:b]) + if p.enableTypeAndUnitLabels && !m.IsEmptyFor(label) { + // Dropping user provided metadata labels, if found in the OM metadata. + continue + } c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start - value := unreplace(s[c:d]) - + value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d])) p.builder.Add(label, value) } p.builder.Sort() *l = p.builder.Labels() - - return s } // Exemplar writes the exemplar of the current sample into the passed exemplar. @@ -219,10 +285,148 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { return true } -// CreatedTimestamp returns nil as it's not implemented yet. -// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 -func (p *OpenMetricsParser) CreatedTimestamp() *int64 { - return nil +// CreatedTimestamp returns the created timestamp for a current Metric if exists or nil. +// NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. +func (p *OpenMetricsParser) CreatedTimestamp() int64 { + if !typeRequiresCT(p.mtype) { + // Not a CT supported metric type, fast path. + p.ctHashSet = 0 // Use ctHashSet as a single way of telling "empty cache" + return 0 + } + + var ( + buf []byte + currName []byte + ) + if len(p.series) > 1 && p.series[0] == '{' && p.series[1] == '"' { + // special case for UTF-8 encoded metric family names. + currName = p.series[p.offsets[0]-p.start : p.mfNameLen+2] + } else { + currName = p.series[p.offsets[0]-p.start : p.mfNameLen] + } + + currHash := p.seriesHash(&buf, currName) + // Check cache, perhaps we fetched something already. + if currHash == p.ctHashSet && p.ct > 0 { + return p.ct + } + + // Create a new lexer and other core state details to reset the parser once this function is done executing. + resetLexer := &openMetricsLexer{ + b: p.l.b, + i: p.l.i, + start: p.l.start, + err: p.l.err, + state: p.l.state, + } + resetStart := p.start + resetMType := p.mtype + + p.skipCTSeries = false + p.ignoreExemplar = true + defer func() { + p.l = resetLexer + p.start = resetStart + p.mtype = resetMType + p.ignoreExemplar = false + }() + + for { + eType, err := p.Next() + if err != nil { + // This means p.Next() will give error too later on, so def no CT line found. + // This might result in partial scrape with wrong/missing CT, but only + // spec improvement would help. + // TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + p.resetCTParseValues() + return 0 + } + if eType != EntrySeries { + // Assume we hit different family, no CT line found. + p.resetCTParseValues() + return 0 + } + + peekedName := p.series[p.offsets[0]-p.start : p.offsets[1]-p.start] + if len(peekedName) < 8 || string(peekedName[len(peekedName)-8:]) != "_created" { + // Not a CT line, search more. + continue + } + + // Remove _created suffix. + peekedHash := p.seriesHash(&buf, peekedName[:len(peekedName)-8]) + if peekedHash != currHash { + // Found CT line for a different series, for our series no CT. + p.resetCTParseValues() + return 0 + } + + // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. + // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#timestamps + ct := int64(p.val * 1000.0) + p.setCTParseValues(ct, currHash, currName, true) + return ct + } +} + +var ( + leBytes = []byte{108, 101} + quantileBytes = []byte{113, 117, 97, 110, 116, 105, 108, 101} +) + +// seriesHash generates a hash based on the metric family name and the offsets +// of label names and values from the parsed OpenMetrics data. It skips quantile +// and le labels for summaries and histograms respectively. +func (p *OpenMetricsParser) seriesHash(offsetsArr *[]byte, metricFamilyName []byte) uint64 { + // Iterate through p.offsets to find the label names and values. + for i := 2; i < len(p.offsets); i += 4 { + lStart := p.offsets[i] - p.start + lEnd := p.offsets[i+1] - p.start + label := p.series[lStart:lEnd] + // Skip quantile and le labels for summaries and histograms. + if p.mtype == model.MetricTypeSummary && bytes.Equal(label, quantileBytes) { + continue + } + if p.mtype == model.MetricTypeHistogram && bytes.Equal(label, leBytes) { + continue + } + *offsetsArr = append(*offsetsArr, p.series[lStart:lEnd]...) + vStart := p.offsets[i+2] - p.start + vEnd := p.offsets[i+3] - p.start + *offsetsArr = append(*offsetsArr, p.series[vStart:vEnd]...) + } + + *offsetsArr = append(*offsetsArr, metricFamilyName...) + hashedOffsets := xxhash.Sum64(*offsetsArr) + + // Reset the offsets array for later reuse. + *offsetsArr = (*offsetsArr)[:0] + return hashedOffsets +} + +// setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found. +// This is useful to prevent re-parsing the same series again and early return the CT value. +func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool) { + p.ct = ct + p.ctHashSet = ctHashSet + p.visitedMFName = mfName + p.skipCTSeries = skipCTSeries // Do we need to set it? +} + +// resetCTParseValues resets the parser to the state before CreatedTimestamp method was called. +func (p *OpenMetricsParser) resetCTParseValues() { + p.ctHashSet = 0 + p.skipCTSeries = true +} + +// typeRequiresCT returns true if the metric type requires a _created timestamp. +func typeRequiresCT(t model.MetricType) bool { + switch t { + case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: + return true + default: + return false + } } // nextToken returns the next token from the openMetricsLexer. @@ -246,10 +450,12 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.start = p.l.i p.offsets = p.offsets[:0] - p.eOffsets = p.eOffsets[:0] - p.exemplar = p.exemplar[:0] - p.exemplarVal = 0 - p.hasExemplarTs = false + if !p.ignoreExemplar { + p.eOffsets = p.eOffsets[:0] + p.exemplar = p.exemplar[:0] + p.exemplarVal = 0 + p.hasExemplarTs = false + } switch t := p.nextToken(); t { case tEOFWord: @@ -268,6 +474,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { mStart++ mEnd-- } + p.mfNameLen = mEnd - mStart p.offsets = append(p.offsets, mStart, mEnd) default: return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) @@ -315,11 +522,11 @@ func (p *OpenMetricsParser) Next() (Entry, error) { case tType: return EntryType, nil case tUnit: + p.unit = string(p.text) m := yoloString(p.l.b[p.offsets[0]:p.offsets[1]]) - u := yoloString(p.text) - if len(u) > 0 { - if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' { - return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", u, m) + if len(p.unit) > 0 { + if !strings.HasSuffix(m, p.unit) || len(m) < len(p.unit)+1 || p.l.b[p.offsets[1]-len(p.unit)-1] != '_' { + return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", p.unit, m) } } return EntryUnit, nil @@ -337,7 +544,13 @@ func (p *OpenMetricsParser) Next() (Entry, error) { } p.series = p.l.b[p.start:p.l.i] - return p.parseMetricSuffix(p.nextToken()) + if err := p.parseSeriesEndOfLine(p.nextToken()); err != nil { + return EntryInvalid, err + } + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() + } + return EntrySeries, nil case tMName: p.offsets = append(p.offsets, p.start, p.l.i) p.series = p.l.b[p.start:p.l.i] @@ -351,8 +564,14 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.series = p.l.b[p.start:p.l.i] t2 = p.nextToken() } - return p.parseMetricSuffix(t2) + if err := p.parseSeriesEndOfLine(t2); err != nil { + return EntryInvalid, err + } + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() + } + return EntrySeries, nil default: err = p.parseError("expected a valid start token", t) } @@ -361,6 +580,16 @@ func (p *OpenMetricsParser) Next() (Entry, error) { func (p *OpenMetricsParser) parseComment() error { var err error + + if p.ignoreExemplar { + for t := p.nextToken(); t != tLinebreak; t = p.nextToken() { + if t == tEOF { + return errors.New("data does not end with # EOF") + } + } + return nil + } + // Parse the labels. p.eOffsets, err = p.parseLVals(p.eOffsets, true) if err != nil { @@ -467,51 +696,63 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e } } -// parseMetricSuffix parses the end of the line after the metric name and -// labels. It starts parsing with the provided token. -func (p *OpenMetricsParser) parseMetricSuffix(t token) (Entry, error) { +// isCreatedSeries returns true if the current series is a _created series. +func (p *OpenMetricsParser) isCreatedSeries() bool { + metricName := p.series[p.offsets[0]-p.start : p.offsets[1]-p.start] + // check length so the metric is longer than len("_created") + if typeRequiresCT(p.mtype) && len(metricName) >= 8 && string(metricName[len(metricName)-8:]) == "_created" { + return true + } + return false +} + +// parseSeriesEndOfLine parses the series end of the line (value, optional +// timestamp, commentary, etc.) after the metric name and labels. +// It starts parsing with the provided token. +func (p *OpenMetricsParser) parseSeriesEndOfLine(t token) error { if p.offsets[0] == -1 { - return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) + return fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) } var err error p.val, err = p.getFloatValue(t, "metric") if err != nil { - return EntryInvalid, err + return err } p.hasTS = false switch t2 := p.nextToken(); t2 { case tEOF: - return EntryInvalid, errors.New("data does not end with # EOF") + return errors.New("data does not end with # EOF") case tLinebreak: break case tComment: if err := p.parseComment(); err != nil { - return EntryInvalid, err + return err } case tTimestamp: p.hasTS = true var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { - return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) + return fmt.Errorf("invalid timestamp %f", ts) } p.ts = int64(ts * 1000) switch t3 := p.nextToken(); t3 { case tLinebreak: case tComment: if err := p.parseComment(); err != nil { - return EntryInvalid, err + return err } default: - return EntryInvalid, p.parseError("expected next entry after timestamp", t3) + return p.parseError("expected next entry after timestamp", t3) } } - return EntrySeries, nil + + return nil } func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { @@ -528,3 +769,15 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error } return val, nil } + +// normalizeFloatsInLabelValues ensures that values of the "le" labels of classic histograms and "quantile" labels +// of summaries follow OpenMetrics formatting rules. +func normalizeFloatsInLabelValues(t model.MetricType, l, v string) string { + if (t == model.MetricTypeSummary && l == model.QuantileLabel) || (t == model.MetricTypeHistogram && l == model.BucketLabel) { + f, err := strconv.ParseFloat(v, 64) + if err == nil { + return formatOpenMetricsFloat(f) + } + } + return v +} diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index bc76a540d3..35536e7861 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -14,6 +14,7 @@ package textparse import ( + "fmt" "io" "testing" @@ -24,6 +25,8 @@ import ( "github.com/prometheus/prometheus/model/labels" ) +func int64p(x int64) *int64 { return &x } + func TestOpenMetricsParse(t *testing.T) { input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary @@ -63,204 +66,611 @@ ss{A="a"} 0 _metric_starting_with_underscore 1 testmetric{_label_starting_with_underscore="foo"} 1 testmetric{label="\"bar\""} 1 +# HELP foo Counter with and without labels to certify CT is parsed for both cases # TYPE foo counter -foo_total 17.0 1520879607.789 # {id="counter-test"} 5` +foo_total 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created 1520872607.123 +foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created{a="b"} 1520872607.123 +foo_total{le="c"} 21.0 +foo_created{le="c"} 1520872621.123 +foo_total{le="1"} 10.0 +# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far +# TYPE bar summary +bar_count 17.0 +bar_sum 324789.3 +bar{quantile="0.95"} 123.7 +bar{quantile="0.99"} 150.0 +bar_created 1520872608.124 +# HELP baz Histogram with the same objective as above's summary +# TYPE baz histogram +baz_bucket{le="0.0"} 0 +baz_bucket{le="+Inf"} 17 +baz_count 17 +baz_sum 324789.3 +baz_created 1520872609.125 +# HELP fizz_created Gauge which shouldn't be parsed as CT +# TYPE fizz_created gauge +fizz_created 17.0 +# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 18 +something_sum 324789.4 +something_created 1520430001 +something_bucket{le="0.0"} 1 +something_bucket{le="1"} 2 +something_bucket{le="+Inf"} 18 +# HELP yum Summary with _created between sum and quantiles +# TYPE yum summary +yum_count 20 +yum_sum 324789.5 +yum_created 1520430003 +yum{quantile="0.95"} 123.7 +yum{quantile="0.99"} 150.0 +# HELP foobar Summary with _created as the first line +# TYPE foobar summary +foobar_count 21 +foobar_created 1520430004 +foobar_sum 324789.6 +foobar{quantile="0.95"} 123.8 +foobar{quantile="0.99"} 150.1` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\n# EOF\n" - int64p := func(x int64) *int64 { return &x } - - exp := []expectedParse{ - { - m: "go_gc_duration_seconds", - help: "A summary of the GC invocation durations.", - }, { - m: "go_gc_duration_seconds", - typ: model.MetricTypeSummary, - }, { - m: "go_gc_duration_seconds", - unit: "seconds", - }, { - m: `go_gc_duration_seconds{quantile="0"}`, - v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"), - }, { - m: `go_gc_duration_seconds{quantile="0.25"}`, - v: 7.424100000000001e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"), - }, { - m: `go_gc_duration_seconds{quantile="0.5",a="b"}`, - v: 8.3835e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"), - }, { - m: "nohelp1", - help: "", - }, { - m: "help2", - help: "escape \\ \n \\ \" \\x chars", - }, { - m: "nounit", - unit: "", - }, { - m: `go_gc_duration_seconds{quantile="1.0",a="b"}`, - v: 8.3835e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), - }, { - m: `go_gc_duration_seconds_count`, - v: 99, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"), - }, { - m: `some:aggregate:rate5m{a_b="c"}`, - v: 1, - lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), - }, { - m: "go_goroutines", - help: "Number of goroutines that currently exist.", - }, { - m: "go_goroutines", - typ: model.MetricTypeGauge, - }, { - m: `go_goroutines`, - v: 33, - t: int64p(123123), - lset: labels.FromStrings("__name__", "go_goroutines"), - }, { - m: "hh", - typ: model.MetricTypeHistogram, - }, { - m: `hh_bucket{le="+Inf"}`, - v: 1, - lset: labels.FromStrings("__name__", "hh_bucket", "le", "+Inf"), - }, { - m: "gh", - typ: model.MetricTypeGaugeHistogram, - }, { - m: `gh_bucket{le="+Inf"}`, - v: 1, - lset: labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"), - }, { - m: "hhh", - typ: model.MetricTypeHistogram, - }, { - m: `hhh_bucket{le="+Inf"}`, - v: 1, - lset: labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4}, - }, { - m: `hhh_count`, - v: 1, - lset: labels.FromStrings("__name__", "hhh_count"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4}, - }, { - m: "ggh", - typ: model.MetricTypeGaugeHistogram, - }, { - m: `ggh_bucket{le="+Inf"}`, - v: 1, - lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, - }, { - m: `ggh_count`, - v: 1, - lset: labels.FromStrings("__name__", "ggh_count"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, - }, { - m: "smr_seconds", - typ: model.MetricTypeSummary, - }, { - m: `smr_seconds_count`, - v: 2, - lset: labels.FromStrings("__name__", "smr_seconds_count"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321}, - }, { - m: `smr_seconds_sum`, - v: 42, - lset: labels.FromStrings("__name__", "smr_seconds_sum"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}, - }, { - m: "ii", - typ: model.MetricTypeInfo, - }, { - m: `ii{foo="bar"}`, - v: 1, - lset: labels.FromStrings("__name__", "ii", "foo", "bar"), - }, { - m: "ss", - typ: model.MetricTypeStateset, - }, { - m: `ss{ss="foo"}`, - v: 1, - lset: labels.FromStrings("__name__", "ss", "ss", "foo"), - }, { - m: `ss{ss="bar"}`, - v: 0, - lset: labels.FromStrings("__name__", "ss", "ss", "bar"), - }, { - m: `ss{A="a"}`, - v: 0, - lset: labels.FromStrings("A", "a", "__name__", "ss"), - }, { - m: "un", - typ: model.MetricTypeUnknown, - }, { - m: "_metric_starting_with_underscore", - v: 1, - lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"), - }, { - m: "testmetric{_label_starting_with_underscore=\"foo\"}", - v: 1, - lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"), - }, { - m: "testmetric{label=\"\\\"bar\\\"\"}", - v: 1, - lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), - }, { - m: "foo", - typ: model.MetricTypeCounter, - }, { - m: "foo_total", - v: 17, - lset: labels.FromStrings("__name__", "foo_total"), - t: int64p(1520879607789), - e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, - }, { - m: "metric", - help: "foo\x00bar", - }, { - m: "null_byte_metric{a=\"abc\x00\"}", - v: 1, - lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"), - }, + for _, typeAndUnitEnabled := range []bool{false, true} { + t.Run(fmt.Sprintf("type-and-unit=%v", typeAndUnitEnabled), func(t *testing.T) { + exp := []parsedEntry{ + { + m: "go_gc_duration_seconds", + help: "A summary of the GC invocation durations.", + }, { + m: "go_gc_duration_seconds", + typ: model.MetricTypeSummary, + }, { + m: "go_gc_duration_seconds", + unit: "seconds", + }, { + m: `go_gc_duration_seconds{quantile="0"}`, + v: 4.9351e-05, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.0"), + labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"), + ), + }, { + m: `go_gc_duration_seconds{quantile="0.25"}`, + v: 7.424100000000001e-05, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.25"), + labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"), + ), + }, { + m: `go_gc_duration_seconds{quantile="0.5",a="b"}`, + v: 8.3835e-05, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "__unit__", "seconds", "quantile", "0.5", "a", "b"), + labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"), + ), + }, { + m: "nohelp1", + help: "", + }, { + m: "help2", + help: "escape \\ \n \\ \" \\x chars", + }, { + m: "nounit", + unit: "", + }, { + m: `go_gc_duration_seconds{quantile="1.0",a="b"}`, + v: 8.3835e-05, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "1.0", "a", "b"), + labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), + ), + }, { + m: `go_gc_duration_seconds_count`, + v: 99, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_gc_duration_seconds_count", "__type__", string(model.MetricTypeSummary)), + labels.FromStrings("__name__", "go_gc_duration_seconds_count"), + ), + }, { + m: `some:aggregate:rate5m{a_b="c"}`, + v: 1, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), model.MetricTypeSummary), + }, { + m: "go_goroutines", + help: "Number of goroutines that currently exist.", + }, { + m: "go_goroutines", + typ: model.MetricTypeGauge, + }, { + m: `go_goroutines`, + v: 33, + t: int64p(123123), + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_goroutines", "__type__", string(model.MetricTypeGauge)), + labels.FromStrings("__name__", "go_goroutines"), + ), + }, { + m: "hh", + typ: model.MetricTypeHistogram, + }, { + m: `hh_bucket{le="+Inf"}`, + v: 1, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "hh_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"), + labels.FromStrings("__name__", "hh_bucket", "le", "+Inf"), + ), + }, { + m: "gh", + typ: model.MetricTypeGaugeHistogram, + }, { + m: `gh_bucket{le="+Inf"}`, + v: 1, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "gh_bucket", "__type__", string(model.MetricTypeGaugeHistogram), "le", "+Inf"), + labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"), + ), + }, { + m: "hhh", + typ: model.MetricTypeHistogram, + }, { + m: `hhh_bucket{le="+Inf"}`, + v: 1, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "hhh_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"), + labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4}, + }, + }, { + m: `hhh_count`, + v: 1, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "hhh_count", "__type__", string(model.MetricTypeHistogram)), + labels.FromStrings("__name__", "hhh_count"), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4}, + }, + }, { + m: "ggh", + typ: model.MetricTypeGaugeHistogram, + }, { + m: `ggh_bucket{le="+Inf"}`, + v: 1, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "ggh_bucket", "__type__", string(model.MetricTypeGaugeHistogram), "le", "+Inf"), + labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + }, + }, { + m: `ggh_count`, + v: 1, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "ggh_count", "__type__", string(model.MetricTypeGaugeHistogram)), + labels.FromStrings("__name__", "ggh_count"), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + }, + }, { + m: "smr_seconds", + typ: model.MetricTypeSummary, + }, { + m: `smr_seconds_count`, + v: 2, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "smr_seconds_count", "__type__", string(model.MetricTypeSummary)), + labels.FromStrings("__name__", "smr_seconds_count"), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321}, + }, + }, { + m: `smr_seconds_sum`, + v: 42, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "smr_seconds_sum", "__type__", string(model.MetricTypeSummary)), + labels.FromStrings("__name__", "smr_seconds_sum"), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}, + }, + }, { + m: "ii", + typ: model.MetricTypeInfo, + }, { + m: `ii{foo="bar"}`, + v: 1, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "ii", "__type__", string(model.MetricTypeInfo), "foo", "bar"), + labels.FromStrings("__name__", "ii", "foo", "bar"), + ), + }, { + m: "ss", + typ: model.MetricTypeStateset, + }, { + m: `ss{ss="foo"}`, + v: 1, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "ss", "foo"), + labels.FromStrings("__name__", "ss", "ss", "foo"), + ), + }, { + m: `ss{ss="bar"}`, + v: 0, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "ss", "bar"), + labels.FromStrings("__name__", "ss", "ss", "bar"), + ), + }, { + m: `ss{A="a"}`, + v: 0, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "ss", "__type__", string(model.MetricTypeStateset), "A", "a"), + labels.FromStrings("__name__", "ss", "A", "a"), + ), + }, { + m: "un", + typ: model.MetricTypeUnknown, + }, { + m: "_metric_starting_with_underscore", + v: 1, + lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"), + }, { + m: "testmetric{_label_starting_with_underscore=\"foo\"}", + v: 1, + lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"), + }, { + m: "testmetric{label=\"\\\"bar\\\"\"}", + v: 1, + lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), + }, { + m: "foo", + help: "Counter with and without labels to certify CT is parsed for both cases", + }, { + m: "foo", + typ: model.MetricTypeCounter, + }, { + m: "foo_total", + v: 17, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter)), + labels.FromStrings("__name__", "foo_total"), + ), + t: int64p(1520879607789), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + }, + ct: 1520872607123, + }, { + m: `foo_total{a="b"}`, + v: 17.0, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "a", "b"), + labels.FromStrings("__name__", "foo_total", "a", "b"), + ), + t: int64p(1520879607789), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + }, + ct: 1520872607123, + }, { + m: `foo_total{le="c"}`, + v: 21.0, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "le", "c"), + labels.FromStrings("__name__", "foo_total", "le", "c"), + ), + ct: 1520872621123, + }, { + m: `foo_total{le="1"}`, + v: 10.0, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "foo_total", "__type__", string(model.MetricTypeCounter), "le", "1"), + labels.FromStrings("__name__", "foo_total", "le", "1"), + ), + }, { + m: "bar", + help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", + }, { + m: "bar", + typ: model.MetricTypeSummary, + }, { + m: "bar_count", + v: 17.0, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "bar_count", "__type__", string(model.MetricTypeSummary)), + labels.FromStrings("__name__", "bar_count"), + ), + ct: 1520872608124, + }, { + m: "bar_sum", + v: 324789.3, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "bar_sum", "__type__", string(model.MetricTypeSummary)), + labels.FromStrings("__name__", "bar_sum"), + ), + ct: 1520872608124, + }, { + m: `bar{quantile="0.95"}`, + v: 123.7, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "bar", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"), + labels.FromStrings("__name__", "bar", "quantile", "0.95"), + ), + ct: 1520872608124, + }, { + m: `bar{quantile="0.99"}`, + v: 150.0, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "bar", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"), + labels.FromStrings("__name__", "bar", "quantile", "0.99"), + ), + ct: 1520872608124, + }, { + m: "baz", + help: "Histogram with the same objective as above's summary", + }, { + m: "baz", + typ: model.MetricTypeHistogram, + }, { + m: `baz_bucket{le="0.0"}`, + v: 0, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "baz_bucket", "__type__", string(model.MetricTypeHistogram), "le", "0.0"), + labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), + ), + ct: 1520872609125, + }, { + m: `baz_bucket{le="+Inf"}`, + v: 17, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "baz_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"), + labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), + ), + ct: 1520872609125, + }, { + m: `baz_count`, + v: 17, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "baz_count", "__type__", string(model.MetricTypeHistogram)), + labels.FromStrings("__name__", "baz_count"), + ), + ct: 1520872609125, + }, { + m: `baz_sum`, + v: 324789.3, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "baz_sum", "__type__", string(model.MetricTypeHistogram)), + labels.FromStrings("__name__", "baz_sum"), + ), + ct: 1520872609125, + }, { + m: "fizz_created", + help: "Gauge which shouldn't be parsed as CT", + }, { + m: "fizz_created", + typ: model.MetricTypeGauge, + }, { + m: `fizz_created`, + v: 17, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "fizz_created", "__type__", string(model.MetricTypeGauge)), + labels.FromStrings("__name__", "fizz_created"), + ), + }, { + m: "something", + help: "Histogram with _created between buckets and summary", + }, { + m: "something", + typ: model.MetricTypeHistogram, + }, { + m: `something_count`, + v: 18, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "something_count", "__type__", string(model.MetricTypeHistogram)), + labels.FromStrings("__name__", "something_count"), + ), + ct: 1520430001000, + }, { + m: `something_sum`, + v: 324789.4, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "something_sum", "__type__", string(model.MetricTypeHistogram)), + labels.FromStrings("__name__", "something_sum"), + ), + ct: 1520430001000, + }, { + m: `something_bucket{le="0.0"}`, + v: 1, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "0.0"), + labels.FromStrings("__name__", "something_bucket", "le", "0.0"), + ), + ct: 1520430001000, + }, { + m: `something_bucket{le="1"}`, + v: 2, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "1.0"), + labels.FromStrings("__name__", "something_bucket", "le", "1.0"), + ), + ct: 1520430001000, + }, { + m: `something_bucket{le="+Inf"}`, + v: 18, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "something_bucket", "__type__", string(model.MetricTypeHistogram), "le", "+Inf"), + labels.FromStrings("__name__", "something_bucket", "le", "+Inf"), + ), + ct: 1520430001000, + }, { + m: "yum", + help: "Summary with _created between sum and quantiles", + }, { + m: "yum", + typ: model.MetricTypeSummary, + }, { + m: `yum_count`, + v: 20, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "yum_count", "__type__", string(model.MetricTypeSummary)), + labels.FromStrings("__name__", "yum_count"), + ), + ct: 1520430003000, + }, { + m: `yum_sum`, + v: 324789.5, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "yum_sum", "__type__", string(model.MetricTypeSummary)), + labels.FromStrings("__name__", "yum_sum"), + ), + ct: 1520430003000, + }, { + m: `yum{quantile="0.95"}`, + v: 123.7, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "yum", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"), + labels.FromStrings("__name__", "yum", "quantile", "0.95"), + ), + ct: 1520430003000, + }, { + m: `yum{quantile="0.99"}`, + v: 150.0, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "yum", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"), + labels.FromStrings("__name__", "yum", "quantile", "0.99"), + ), + ct: 1520430003000, + }, { + m: "foobar", + help: "Summary with _created as the first line", + }, { + m: "foobar", + typ: model.MetricTypeSummary, + }, { + m: `foobar_count`, + v: 21, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "foobar_count", "__type__", string(model.MetricTypeSummary)), + labels.FromStrings("__name__", "foobar_count"), + ), + ct: 1520430004000, + }, { + m: `foobar_sum`, + v: 324789.6, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "foobar_sum", "__type__", string(model.MetricTypeSummary)), + labels.FromStrings("__name__", "foobar_sum"), + ), + ct: 1520430004000, + }, { + m: `foobar{quantile="0.95"}`, + v: 123.8, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "foobar", "__type__", string(model.MetricTypeSummary), "quantile", "0.95"), + labels.FromStrings("__name__", "foobar", "quantile", "0.95"), + ), + ct: 1520430004000, + }, { + m: `foobar{quantile="0.99"}`, + v: 150.1, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "foobar", "__type__", string(model.MetricTypeSummary), "quantile", "0.99"), + labels.FromStrings("__name__", "foobar", "quantile", "0.99"), + ), + ct: 1520430004000, + }, { + m: "metric", + help: "foo\x00bar", + }, { + m: "null_byte_metric{a=\"abc\x00\"}", + v: 1, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"), model.MetricTypeSummary), + }, + } + opts := []OpenMetricsOption{WithOMParserCTSeriesSkipped()} + if typeAndUnitEnabled { + opts = append(opts, WithOMParserTypeAndUnitLabels()) + } + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), opts...) + got := testParse(t, p) + requireEntries(t, exp, got) + }) } - - p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) } -func TestUTF8OpenMetricsParse(t *testing.T) { - oldValidationScheme := model.NameValidationScheme - model.NameValidationScheme = model.UTF8Validation - defer func() { - model.NameValidationScheme = oldValidationScheme - }() - +func TestOpenMetricsParse_UTF8(t *testing.T) { input := `# HELP "go.gc_duration_seconds" A summary of the GC invocation durations. # TYPE "go.gc_duration_seconds" summary # UNIT "go.gc_duration_seconds" seconds {"go.gc_duration_seconds",quantile="0"} 4.9351e-05 {"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05 +{"go.gc_duration_seconds_created"} 1520872607.123 {"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 {q="0.9","http.status",a="b"} 8.3835e-05 {"go.gc_duration_seconds_sum"} 0.004304266 -{"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"} 10.0` +{"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"} 10.0 +quotedexemplar_count 1 # {"id.thing"="histogram-count-test"} 4 +quotedexemplar2_count 1 # {"id.thing"="histogram-count-test",other="hello"} 4 +` - input += "\n# EOF\n" + input += "# EOF\n" - exp := []expectedParse{ + exp := []parsedEntry{ { m: "go.gc_duration_seconds", help: "A summary of the GC invocation durations.", @@ -273,11 +683,13 @@ func TestUTF8OpenMetricsParse(t *testing.T) { }, { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.0"), + ct: 1520872607123, }, { m: `{"go.gc_duration_seconds",quantile="0.25"}`, v: 7.424100000000001e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), + ct: 1520872607123, }, { m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, v: 8.3835e-05, @@ -303,11 +715,26 @@ func TestUTF8OpenMetricsParse(t *testing.T) { v: 10.0, lset: labels.FromStrings("__name__", `Heizölrückstoßabdämpfung 10€ metric with "interesting" {character choices}`, "strange©™\n'quoted' \"name\"", "6"), + }, { + m: `quotedexemplar_count`, + v: 1, + lset: labels.FromStrings("__name__", "quotedexemplar_count"), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id.thing", "histogram-count-test"), Value: 4}, + }, + }, { + m: `quotedexemplar2_count`, + v: 1, + lset: labels.FromStrings("__name__", "quotedexemplar2_count"), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id.thing", "histogram-count-test", "other", "hello"), Value: 4}, + }, }, } - p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + got := testParse(t, p) + requireEntries(t, exp, got) } func TestOpenMetricsParseErrors(t *testing.T) { @@ -598,14 +1025,10 @@ func TestOpenMetricsParseErrors(t *testing.T) { input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf", err: `invalid exemplar timestamp -Inf`, }, - { - input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf", - err: `invalid exemplar timestamp +Inf`, - }, } for i, c := range cases { - p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable()) + p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) var err error for err == nil { _, err = p.Next() @@ -670,17 +1093,121 @@ func TestOMNullByteHandling(t *testing.T) { } for i, c := range cases { - p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable()) + p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) var err error for err == nil { _, err = p.Next() } if c.err == "" { - require.Equal(t, io.EOF, err, "test %d", i) + require.ErrorIs(t, err, io.EOF, "test %d", i) continue } - require.Equal(t, c.err, err.Error(), "test %d", i) + require.EqualError(t, err, c.err, "test %d", i) + } +} + +// TestCTParseFailures tests known failure edge cases, we know does not work due +// current OM spec limitations or clients with broken OM format. +// TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. +func TestCTParseFailures(t *testing.T) { + for _, tcase := range []struct { + name string + input string + expected []parsedEntry + }{ + { + name: "_created line is a first one", + input: `# HELP thing histogram with _created as first line +# TYPE thing histogram +thing_created 1520872607.123 +thing_count 17 +thing_sum 324789.3 +thing_bucket{le="0.0"} 0 +thing_bucket{le="+Inf"} 17 +# HELP thing_c counter with _created as first line +# TYPE thing_c counter +thing_c_created 1520872607.123 +thing_c_total 14123.232 +# EOF +`, + expected: []parsedEntry{ + { + m: "thing", + help: "histogram with _created as first line", + }, + { + m: "thing", + typ: model.MetricTypeHistogram, + }, + { + m: `thing_count`, + ct: 0, // Should be int64p(1520872607123). + }, + { + m: `thing_sum`, + ct: 0, // Should be int64p(1520872607123). + }, + { + m: `thing_bucket{le="0.0"}`, + ct: 0, // Should be int64p(1520872607123). + }, + { + m: `thing_bucket{le="+Inf"}`, + ct: 0, // Should be int64p(1520872607123), + }, + { + m: "thing_c", + help: "counter with _created as first line", + }, + { + m: "thing_c", + typ: model.MetricTypeCounter, + }, + { + m: `thing_c_total`, + ct: 0, // Should be int64p(1520872607123). + }, + }, + }, + { + // TODO(bwplotka): Kind of correct bevaviour? If yes, let's move to the OK tests above. + name: "maybe counter with no meta", + input: `foo_total 17.0 +foo_created 1520872607.123 +foo_total{a="b"} 17.0 +foo_created{a="b"} 1520872608.123 +# EOF +`, + expected: []parsedEntry{ + { + m: `foo_total`, + }, + { + m: `foo_created`, + }, + { + m: `foo_total{a="b"}`, + }, + { + m: `foo_created{a="b"}`, + }, + }, + }, + } { + t.Run(fmt.Sprintf("case=%v", tcase.name), func(t *testing.T) { + p := NewOpenMetricsParser([]byte(tcase.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + got := testParse(t, p) + resetValAndLset(got) // Keep this test focused on metric, basic entries and CT only. + requireEntries(t, tcase.expected, got) + }) + } +} + +func resetValAndLset(e []parsedEntry) { + for i := range e { + e[i].v = 0 + e[i].lset = labels.EmptyLabels() } } diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index a611f3aea7..5ca61d1972 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -32,6 +32,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/schema" ) type promlexer struct { @@ -160,16 +161,19 @@ type PromParser struct { // of the metric name and label names and values for this series. // p.offsets[0] is the start character of the metric name. // p.offsets[1] is the end of the metric name. - // Subsequently, p.offsets is a pair of pair of offsets for the positions + // Subsequently, p.offsets is a pair of offsets for the positions // of the label name and value start and end characters. offsets []int + + enableTypeAndUnitLabels bool } // NewPromParser returns a new parser of the byte slice. -func NewPromParser(b []byte, st *labels.SymbolTable) Parser { +func NewPromParser(b []byte, st *labels.SymbolTable, enableTypeAndUnitLabels bool) Parser { return &PromParser{ - l: &promlexer{b: append(b, '\n')}, - builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + l: &promlexer{b: append(b, '\n')}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + enableTypeAndUnitLabels: enableTypeAndUnitLabels, } } @@ -223,30 +227,43 @@ func (p *PromParser) Comment() []byte { return p.text } -// Metric writes the labels of the current sample into the passed labels. -// It returns the string from which the metric was parsed. -func (p *PromParser) Metric(l *labels.Labels) string { - // Copy the buffer to a string: this is only necessary for the return value. +// Labels writes the labels of the current sample into the passed labels. +func (p *PromParser) Labels(l *labels.Labels) { + // Defensive copy in case the following keeps a reference. + // See https://github.com/prometheus/prometheus/issues/16490 s := string(p.series) - p.builder.Reset() metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start]) - p.builder.Add(labels.MetricName, metricName) + m := schema.Metadata{ + Name: metricName, + // NOTE(bwplotka): There is a known case where the type is wrong on a broken exposition + // (see the TestPromParse windspeed metric). Fixing it would require extra + // allocs and benchmarks. Since it was always broken, don't fix for now. + Type: p.mtype, + } + + if p.enableTypeAndUnitLabels { + m.AddToLabels(&p.builder) + } else { + p.builder.Add(labels.MetricName, metricName) + } for i := 2; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start b := p.offsets[i+1] - p.start label := unreplace(s[a:b]) + if p.enableTypeAndUnitLabels && !m.IsEmptyFor(label) { + // Dropping user provided metadata labels, if found in the OM metadata. + continue + } c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start - value := unreplace(s[c:d]) + value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d])) p.builder.Add(label, value) } p.builder.Sort() *l = p.builder.Labels() - - return s } // Exemplar implements the Parser interface. However, since the classic @@ -256,10 +273,10 @@ func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } -// CreatedTimestamp returns nil as it's not implemented yet. +// CreatedTimestamp returns 0 as it's not implemented yet. // TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 -func (p *PromParser) CreatedTimestamp() *int64 { - return nil +func (p *PromParser) CreatedTimestamp() int64 { + return 0 } // nextToken returns the next token from the promlexer. It skips over tabs @@ -502,13 +519,17 @@ func unreplace(s string) string { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +func yoloBytes(b string) []byte { + return unsafe.Slice(unsafe.StringData(b), len(b)) } func parseFloat(s string) (float64, error) { // Keep to pre-Go 1.13 float formats. if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") + return 0, errors.New("unsupported character in float") } return strconv.ParseFloat(s, 64) } diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 66986291d7..4e9406808f 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -14,33 +14,53 @@ package textparse import ( - "bytes" - "errors" + "fmt" "io" - "os" "testing" - "github.com/klauspost/compress/gzip" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/prometheus/common/expfmt" - "github.com/prometheus/common/model" - - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/util/testutil" ) -type expectedParse struct { - lset labels.Labels - m string - t *int64 - v float64 - typ model.MetricType - help string - unit string - comment string - e *exemplar.Exemplar +// lbls is a helper for the readability of the expectations. +func typeAndUnitLabels(typeAndUnitEnabled bool, enabled, disabled labels.Labels) labels.Labels { + if typeAndUnitEnabled { + return enabled + } + return disabled +} + +// todoDetectFamilySwitch exists because there's a known TODO that require dedicated PR and benchmarks for PROM-39. +// OM and Prom text format do NOT require TYPE, HELP or UNIT lines. This means that metric families can switch without +// those metadata entries e.g.: +// ``` +// TYPE go_goroutines gauge +// go_goroutines 33 # previous metric +// different_metric_total 12 # <--- different family! +// ``` +// The expected type for "different_metric_total" is obviously unknown type and unit, but it's surprisingly expensive and complex +// to reliably write parser for those cases. Two main issues: +// a. TYPE and UNIT are associated with "metric family" which is different than resulting metric name (e.g. histograms). +// b. You have to alloc additional entries to pair TYPE and UNIT with metric families they refer to (nit) +// +// This problem is elevated for PROM-39 feature. +// +// Current metadata handling is semi broken here for this as the (a) is expensive and currently not fully accurate +// see: https://github.com/prometheus/prometheus/blob/dbf5d01a62249eddcd202303069f6cf7dd3c4a73/scrape/scrape.go#L1916 +// +// To iterate, we keep it "knowingly" broken behind the feature flag. +// TODO(bwplotka): Remove this once we fix the problematic case e.g. +// - introduce more accurate isSeriesPartOfFamily shared helper or even parser method that tells when new metric family starts +func todoDetectFamilySwitch(typeAndUnitEnabled bool, expected labels.Labels, brokenTypeInherited model.MetricType) labels.Labels { + if typeAndUnitEnabled && brokenTypeInherited != model.MetricTypeUnknown { + // Hack for now. + b := labels.NewBuilder(expected) + b.Set("__type__", string(brokenTypeInherited)) + return b.Labels() + } + return expected } func TestPromParse(t *testing.T) { @@ -51,6 +71,13 @@ go_gc_duration_seconds{quantile="0.25",} 7.424100000000001e-05 go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05 go_gc_duration_seconds{quantile="0.8", a="b"} 8.3835e-05 go_gc_duration_seconds{ quantile="0.9", a="b"} 8.3835e-05 +# HELP prometheus_http_request_duration_seconds Histogram of latencies for HTTP requests. +# TYPE prometheus_http_request_duration_seconds histogram +prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 423 +prometheus_http_request_duration_seconds_bucket{handler="/",le="2"} 1423 +prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 1423 +prometheus_http_request_duration_seconds_sum{handler="/"} 2000 +prometheus_http_request_duration_seconds_count{handler="/"} 1423 # Hrandom comment starting with prefix of HELP # wind_speed{A="2",c="3"} 12345 @@ -68,188 +95,302 @@ some:aggregate:rate5m{a_b="c"} 1 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 33 123123 +# TYPE some_counter_total counter +# HELP some_counter_total Help after type. +some_counter_total 12 +# HELP nohelp3 _metric_starting_with_underscore 1 testmetric{_label_starting_with_underscore="foo"} 1 -testmetric{label="\"bar\""} 1` +testmetric{label="\"bar\""} 1 +testmetric{le="10"} 1 +# HELP type_and_unit_test1 Type specified in metadata overrides. +# TYPE type_and_unit_test1 gauge +type_and_unit_test1{__type__="counter"} 123 +# HELP type_and_unit_test2 Type specified in label. +type_and_unit_test2{__type__="counter"} 123` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" - int64p := func(x int64) *int64 { return &x } - - exp := []expectedParse{ - { - m: "go_gc_duration_seconds", - help: "A summary of the GC invocation durations.", - }, { - m: "go_gc_duration_seconds", - typ: model.MetricTypeSummary, - }, { - m: `go_gc_duration_seconds{quantile="0"}`, - v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"), - }, { - m: `go_gc_duration_seconds{quantile="0.25",}`, - v: 7.424100000000001e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"), - }, { - m: `go_gc_duration_seconds{quantile="0.5",a="b"}`, - v: 8.3835e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"), - }, { - m: `go_gc_duration_seconds{quantile="0.8", a="b"}`, - v: 8.3835e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.8", "a", "b"), - }, { - m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`, - v: 8.3835e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"), - }, { - comment: "# Hrandom comment starting with prefix of HELP", - }, { - comment: "#", - }, { - m: `wind_speed{A="2",c="3"}`, - v: 12345, - lset: labels.FromStrings("A", "2", "__name__", "wind_speed", "c", "3"), - }, { - comment: "# comment with escaped \\n newline", - }, { - comment: "# comment with escaped \\ escape character", - }, { - m: "nohelp1", - help: "", - }, { - m: "nohelp2", - help: "", - }, { - m: `go_gc_duration_seconds{ quantile="1.0", a="b" }`, - v: 8.3835e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), - }, { - m: `go_gc_duration_seconds { quantile="1.0", a="b" }`, - v: 8.3835e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), - }, { - m: `go_gc_duration_seconds { quantile= "1.0", a= "b", }`, - v: 8.3835e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), - }, { - m: `go_gc_duration_seconds { quantile = "1.0", a = "b" }`, - v: 8.3835e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), - }, { - // NOTE: Unlike OpenMetrics, Promparse allows spaces between label terms. This appears to be unintended and should probably be fixed. - m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`, - v: 8.3835e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"), - }, { - m: `go_gc_duration_seconds_count`, - v: 99, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"), - }, { - m: `some:aggregate:rate5m{a_b="c"}`, - v: 1, - lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), - }, { - m: "go_goroutines", - help: "Number of goroutines that currently exist.", - }, { - m: "go_goroutines", - typ: model.MetricTypeGauge, - }, { - m: `go_goroutines`, - v: 33, - t: int64p(123123), - lset: labels.FromStrings("__name__", "go_goroutines"), - }, { - m: "_metric_starting_with_underscore", - v: 1, - lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"), - }, { - m: "testmetric{_label_starting_with_underscore=\"foo\"}", - v: 1, - lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"), - }, { - m: "testmetric{label=\"\\\"bar\\\"\"}", - v: 1, - lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), - }, { - m: "metric", - help: "foo\x00bar", - }, { - m: "null_byte_metric{a=\"abc\x00\"}", - v: 1, - lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"), - }, - } - - p := NewPromParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) -} - -func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { - i := 0 - - var res labels.Labels - - for { - et, err := p.Next() - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - - switch et { - case EntrySeries: - m, ts, v := p.Series() - - p.Metric(&res) - - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].t, ts) - require.Equal(t, exp[i].v, v) - testutil.RequireEqual(t, exp[i].lset, res) - - var e exemplar.Exemplar - found := p.Exemplar(&e) - if exp[i].e == nil { - require.False(t, found) - } else { - require.True(t, found) - testutil.RequireEqual(t, *exp[i].e, e) + for _, typeAndUnitEnabled := range []bool{false, true} { + t.Run(fmt.Sprintf("type-and-unit=%v", typeAndUnitEnabled), func(t *testing.T) { + exp := []parsedEntry{ + { + m: "go_gc_duration_seconds", + help: "A summary of the GC invocation durations.", + }, + { + m: "go_gc_duration_seconds", + typ: model.MetricTypeSummary, + }, + { + m: `go_gc_duration_seconds{quantile="0"}`, + v: 4.9351e-05, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.0"), + labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"), + ), + }, + { + m: `go_gc_duration_seconds{quantile="0.25",}`, + v: 7.424100000000001e-05, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.25"), + labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"), + ), + }, + { + m: `go_gc_duration_seconds{quantile="0.5",a="b"}`, + v: 8.3835e-05, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.5", "a", "b"), + labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"), + ), + }, + { + m: `go_gc_duration_seconds{quantile="0.8", a="b"}`, + v: 8.3835e-05, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.8", "a", "b"), + labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.8", "a", "b"), + ), + }, + { + m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`, + v: 8.3835e-05, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_gc_duration_seconds", "__type__", string(model.MetricTypeSummary), "quantile", "0.9", "a", "b"), + labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"), + ), + }, + { + m: "prometheus_http_request_duration_seconds", + help: "Histogram of latencies for HTTP requests.", + }, + { + m: "prometheus_http_request_duration_seconds", + typ: model.MetricTypeHistogram, + }, + { + m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="1"}`, + v: 423, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "__type__", string(model.MetricTypeHistogram), "handler", "/", "le", "1.0"), + labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "1.0"), + ), + }, + { + m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="2"}`, + v: 1423, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "__type__", string(model.MetricTypeHistogram), "handler", "/", "le", "2.0"), + labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "2.0"), + ), + }, + { + m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"}`, + v: 1423, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "__type__", string(model.MetricTypeHistogram), "handler", "/", "le", "+Inf"), + labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "+Inf"), + ), + }, + { + m: `prometheus_http_request_duration_seconds_sum{handler="/"}`, + v: 2000, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "__type__", string(model.MetricTypeHistogram), "handler", "/"), + labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "handler", "/"), + ), + }, + { + m: `prometheus_http_request_duration_seconds_count{handler="/"}`, + v: 1423, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "__type__", string(model.MetricTypeHistogram), "handler", "/"), + labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "handler", "/"), + ), + }, + { + comment: "# Hrandom comment starting with prefix of HELP", + }, + { + comment: "#", + }, + { + m: `wind_speed{A="2",c="3"}`, + v: 12345, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + // NOTE(bwplotka): This is knowingly broken, inheriting old type when TYPE was not specified on a new metric. + // This was broken forever on a case for a broken exposition. Don't fix for now (expensive). + labels.FromStrings("A", "2", "__name__", "wind_speed", "__type__", string(model.MetricTypeHistogram), "c", "3"), + labels.FromStrings("A", "2", "__name__", "wind_speed", "c", "3"), + ), + }, + { + comment: "# comment with escaped \\n newline", + }, + { + comment: "# comment with escaped \\ escape character", + }, + { + m: "nohelp1", + help: "", + }, + { + m: "nohelp2", + help: "", + }, + { + m: `go_gc_duration_seconds{ quantile="1.0", a="b" }`, + v: 8.3835e-05, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram), + }, + { + m: `go_gc_duration_seconds { quantile="1.0", a="b" }`, + v: 8.3835e-05, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram), + }, + { + m: `go_gc_duration_seconds { quantile= "1.0", a= "b", }`, + v: 8.3835e-05, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram), + }, + { + m: `go_gc_duration_seconds { quantile = "1.0", a = "b" }`, + v: 8.3835e-05, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), model.MetricTypeHistogram), + }, + { + // NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed. + m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`, + v: 8.3835e-05, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"), model.MetricTypeHistogram), + }, + { + m: `go_gc_duration_seconds_count`, + v: 99, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "go_gc_duration_seconds_count"), model.MetricTypeHistogram), + }, + { + m: `some:aggregate:rate5m{a_b="c"}`, + v: 1, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), model.MetricTypeHistogram), + }, + { + m: "go_goroutines", + help: "Number of goroutines that currently exist.", + }, + { + m: "go_goroutines", + typ: model.MetricTypeGauge, + }, + { + m: `go_goroutines`, + v: 33, + t: int64p(123123), + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "go_goroutines", "__type__", string(model.MetricTypeGauge)), + labels.FromStrings("__name__", "go_goroutines"), + ), + }, + { + m: "some_counter_total", + typ: model.MetricTypeCounter, + }, + { + m: "some_counter_total", + help: "Help after type.", + }, + { + m: `some_counter_total`, + v: 12, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "some_counter_total", "__type__", string(model.MetricTypeCounter)), + labels.FromStrings("__name__", "some_counter_total"), + ), + }, + { + m: "nohelp3", + help: "", + }, + { + m: "_metric_starting_with_underscore", + v: 1, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "_metric_starting_with_underscore"), model.MetricTypeCounter), + }, + { + m: "testmetric{_label_starting_with_underscore=\"foo\"}", + v: 1, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"), model.MetricTypeCounter), + }, + { + m: "testmetric{label=\"\\\"bar\\\"\"}", + v: 1, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "testmetric", "label", `"bar"`), model.MetricTypeCounter), + }, + { + m: `testmetric{le="10"}`, + v: 1, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "testmetric", "le", "10"), model.MetricTypeCounter), + }, + { + m: "type_and_unit_test1", + help: "Type specified in metadata overrides.", + }, + { + m: "type_and_unit_test1", + typ: model.MetricTypeGauge, + }, + { + m: "type_and_unit_test1{__type__=\"counter\"}", + v: 123, + lset: typeAndUnitLabels( + typeAndUnitEnabled, + labels.FromStrings("__name__", "type_and_unit_test1", "__type__", string(model.MetricTypeGauge)), + labels.FromStrings("__name__", "type_and_unit_test1", "__type__", string(model.MetricTypeCounter)), + ), + }, + { + m: "type_and_unit_test2", + help: "Type specified in label.", + }, + { + m: "type_and_unit_test2{__type__=\"counter\"}", + v: 123, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "type_and_unit_test2", "__type__", string(model.MetricTypeCounter)), model.MetricTypeGauge), + }, + { + m: "metric", + help: "foo\x00bar", + }, + { + m: "null_byte_metric{a=\"abc\x00\"}", + v: 1, + lset: todoDetectFamilySwitch(typeAndUnitEnabled, labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"), model.MetricTypeGauge), + }, } - case EntryType: - m, typ := p.Type() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].typ, typ) - - case EntryHelp: - m, h := p.Help() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].help, string(h)) - - case EntryUnit: - m, u := p.Unit() - require.Equal(t, exp[i].m, string(m)) - require.Equal(t, exp[i].unit, string(u)) - - case EntryComment: - require.Equal(t, exp[i].comment, string(p.Comment())) - } - - i++ + p := NewPromParser([]byte(input), labels.NewSymbolTable(), typeAndUnitEnabled) + got := testParse(t, p) + requireEntries(t, exp, got) + }) } - require.Len(t, exp, i) } func TestUTF8PromParse(t *testing.T) { - oldValidationScheme := model.NameValidationScheme - model.NameValidationScheme = model.UTF8Validation - defer func() { - model.NameValidationScheme = oldValidationScheme - }() - input := `# HELP "go.gc_duration_seconds" A summary of the GC invocation durations. # TYPE "go.gc_duration_seconds" summary {"go.gc_duration_seconds",quantile="0"} 4.9351e-05 @@ -264,7 +405,7 @@ func TestUTF8PromParse(t *testing.T) { {"go.gc_duration_seconds_count"} 99 {"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"} 10.0` - exp := []expectedParse{ + exp := []parsedEntry{ { m: "go.gc_duration_seconds", help: "A summary of the GC invocation durations.", @@ -274,7 +415,7 @@ func TestUTF8PromParse(t *testing.T) { }, { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.0"), }, { m: `{"go.gc_duration_seconds",quantile="0.25",}`, v: 7.424100000000001e-05, @@ -319,8 +460,9 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"), }, } - p := NewPromParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + p := NewPromParser([]byte(input), labels.NewSymbolTable(), false) + got := testParse(t, p) + requireEntries(t, exp, got) } func TestPromParseErrors(t *testing.T) { @@ -399,13 +541,12 @@ func TestPromParseErrors(t *testing.T) { } for i, c := range cases { - p := NewPromParser([]byte(c.input), labels.NewSymbolTable()) + p := NewPromParser([]byte(c.input), labels.NewSymbolTable(), false) var err error for err == nil { _, err = p.Next() } - require.Error(t, err) - require.Equal(t, c.err, err.Error(), "test %d", i) + require.EqualError(t, err, c.err, "test %d", i) } } @@ -453,7 +594,7 @@ func TestPromNullByteHandling(t *testing.T) { } for i, c := range cases { - p := NewPromParser([]byte(c.input), labels.NewSymbolTable()) + p := NewPromParser([]byte(c.input), labels.NewSymbolTable(), false) var err error for err == nil { _, err = p.Next() @@ -464,192 +605,6 @@ func TestPromNullByteHandling(t *testing.T) { continue } - require.Error(t, err) - require.Equal(t, c.err, err.Error(), "test %d", i) - } -} - -const ( - promtestdataSampleCount = 410 -) - -func BenchmarkParse(b *testing.B) { - for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ - "prometheus": NewPromParser, - "openmetrics": NewOpenMetricsParser, - } { - for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { - f, err := os.Open(fn) - require.NoError(b, err) - defer f.Close() - - buf, err := io.ReadAll(f) - require.NoError(b, err) - - b.Run(parserName+"/no-decode-metric/"+fn, func(b *testing.B) { - total := 0 - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run(parserName+"/decode-metric/"+fn, func(b *testing.B) { - total := 0 - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - - var res labels.Labels - p.Metric(&res) - - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run(parserName+"/decode-metric-reuse/"+fn, func(b *testing.B) { - total := 0 - var res labels.Labels - - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i += promtestdataSampleCount { - p := parser(buf, st) - - Outer: - for i < b.N { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Outer - } - b.Fatal(err) - case EntrySeries: - m, _, _ := p.Series() - - p.Metric(&res) - - total += len(m) - i++ - } - } - } - _ = total - }) - b.Run("expfmt-text/"+fn, func(b *testing.B) { - if parserName != "prometheus" { - b.Skip() - } - b.SetBytes(int64(len(buf) / promtestdataSampleCount)) - b.ReportAllocs() - b.ResetTimer() - - total := 0 - - for i := 0; i < b.N; i += promtestdataSampleCount { - decSamples := make(model.Vector, 0, 50) - sdec := expfmt.SampleDecoder{ - Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(expfmt.TypeTextPlain)), - Opts: &expfmt.DecodeOptions{ - Timestamp: model.TimeFromUnixNano(0), - }, - } - - for { - if err = sdec.Decode(&decSamples); err != nil { - break - } - total += len(decSamples) - decSamples = decSamples[:0] - } - } - _ = total - }) - } - } -} - -func BenchmarkGzip(b *testing.B) { - for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { - b.Run(fn, func(b *testing.B) { - f, err := os.Open(fn) - require.NoError(b, err) - defer f.Close() - - var buf bytes.Buffer - gw := gzip.NewWriter(&buf) - - n, err := io.Copy(gw, f) - require.NoError(b, err) - require.NoError(b, gw.Close()) - - gbuf, err := io.ReadAll(&buf) - require.NoError(b, err) - - k := b.N / promtestdataSampleCount - - b.ReportAllocs() - b.SetBytes(n / promtestdataSampleCount) - b.ResetTimer() - - total := 0 - - for i := 0; i < k; i++ { - gr, err := gzip.NewReader(bytes.NewReader(gbuf)) - require.NoError(b, err) - - d, err := io.ReadAll(gr) - require.NoError(b, err) - require.NoError(b, gr.Close()) - - total += len(d) - } - _ = total - }) + require.EqualError(t, err, c.err, "test %d", i) } } diff --git a/model/textparse/promtestdata.nometa.txt b/model/textparse/promtestdata.nometa.txt deleted file mode 100644 index 235f0aa464..0000000000 --- a/model/textparse/promtestdata.nometa.txt +++ /dev/null @@ -1,411 +0,0 @@ -go_gc_duration_seconds{quantile="0"} 4.9351e-05 -go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 -go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 -go_gc_duration_seconds{quantile="0.75"} 0.000106744 -go_gc_duration_seconds{quantile="1"} 0.002072195 -go_gc_duration_seconds_sum 0.012139815 -go_gc_duration_seconds_count 99 -go_goroutines 33 -go_memstats_alloc_bytes 1.7518624e+07 -go_memstats_alloc_bytes_total 8.3062296e+08 -go_memstats_buck_hash_sys_bytes 1.494637e+06 -go_memstats_frees_total 4.65658e+06 -go_memstats_gc_sys_bytes 1.107968e+06 -go_memstats_heap_alloc_bytes 1.7518624e+07 -go_memstats_heap_idle_bytes 6.668288e+06 -go_memstats_heap_inuse_bytes 1.8956288e+07 -go_memstats_heap_objects 72755 -go_memstats_heap_released_bytes_total 0 -go_memstats_heap_sys_bytes 2.5624576e+07 -go_memstats_last_gc_time_seconds 1.4843955586166437e+09 -go_memstats_lookups_total 2089 -go_memstats_mallocs_total 4.729335e+06 -go_memstats_mcache_inuse_bytes 9600 -go_memstats_mcache_sys_bytes 16384 -go_memstats_mspan_inuse_bytes 211520 -go_memstats_mspan_sys_bytes 245760 -go_memstats_next_gc_bytes 2.033527e+07 -go_memstats_other_sys_bytes 2.077323e+06 -go_memstats_stack_inuse_bytes 1.6384e+06 -go_memstats_stack_sys_bytes 1.6384e+06 -go_memstats_sys_bytes 3.2205048e+07 -http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="alerts"} 0 -http_request_duration_microseconds_count{handler="alerts"} 0 -http_request_duration_microseconds{handler="config",quantile="0.5"} NaN -http_request_duration_microseconds{handler="config",quantile="0.9"} NaN -http_request_duration_microseconds{handler="config",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="config"} 0 -http_request_duration_microseconds_count{handler="config"} 0 -http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="consoles"} 0 -http_request_duration_microseconds_count{handler="consoles"} 0 -http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="drop_series"} 0 -http_request_duration_microseconds_count{handler="drop_series"} 0 -http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="federate"} 0 -http_request_duration_microseconds_count{handler="federate"} 0 -http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="flags"} 0 -http_request_duration_microseconds_count{handler="flags"} 0 -http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 -http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 -http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 -http_request_duration_microseconds_sum{handler="graph"} 5803.93 -http_request_duration_microseconds_count{handler="graph"} 3 -http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="heap"} 0 -http_request_duration_microseconds_count{handler="heap"} 0 -http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 -http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 -http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 -http_request_duration_microseconds_sum{handler="label_values"} 3995.574 -http_request_duration_microseconds_count{handler="label_values"} 3 -http_request_duration_microseconds{handler="options",quantile="0.5"} NaN -http_request_duration_microseconds{handler="options",quantile="0.9"} NaN -http_request_duration_microseconds{handler="options",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="options"} 0 -http_request_duration_microseconds_count{handler="options"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 -http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 -http_request_duration_microseconds_count{handler="prometheus"} 462 -http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 -http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 -http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 -http_request_duration_microseconds_sum{handler="query"} 26074.11 -http_request_duration_microseconds_count{handler="query"} 6 -http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="query_range"} 0 -http_request_duration_microseconds_count{handler="query_range"} 0 -http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="rules"} 0 -http_request_duration_microseconds_count{handler="rules"} 0 -http_request_duration_microseconds{handler="series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="series"} 0 -http_request_duration_microseconds_count{handler="series"} 0 -http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 -http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 -http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 -http_request_duration_microseconds_sum{handler="static"} 6458.621 -http_request_duration_microseconds_count{handler="static"} 3 -http_request_duration_microseconds{handler="status",quantile="0.5"} NaN -http_request_duration_microseconds{handler="status",quantile="0.9"} NaN -http_request_duration_microseconds{handler="status",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="status"} 0 -http_request_duration_microseconds_count{handler="status"} 0 -http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="targets"} 0 -http_request_duration_microseconds_count{handler="targets"} 0 -http_request_duration_microseconds{handler="version",quantile="0.5"} NaN -http_request_duration_microseconds{handler="version",quantile="0.9"} NaN -http_request_duration_microseconds{handler="version",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="version"} 0 -http_request_duration_microseconds_count{handler="version"} 0 -http_request_size_bytes{handler="alerts",quantile="0.5"} NaN -http_request_size_bytes{handler="alerts",quantile="0.9"} NaN -http_request_size_bytes{handler="alerts",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="alerts"} 0 -http_request_size_bytes_count{handler="alerts"} 0 -http_request_size_bytes{handler="config",quantile="0.5"} NaN -http_request_size_bytes{handler="config",quantile="0.9"} NaN -http_request_size_bytes{handler="config",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="config"} 0 -http_request_size_bytes_count{handler="config"} 0 -http_request_size_bytes{handler="consoles",quantile="0.5"} NaN -http_request_size_bytes{handler="consoles",quantile="0.9"} NaN -http_request_size_bytes{handler="consoles",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="consoles"} 0 -http_request_size_bytes_count{handler="consoles"} 0 -http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="drop_series"} 0 -http_request_size_bytes_count{handler="drop_series"} 0 -http_request_size_bytes{handler="federate",quantile="0.5"} NaN -http_request_size_bytes{handler="federate",quantile="0.9"} NaN -http_request_size_bytes{handler="federate",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="federate"} 0 -http_request_size_bytes_count{handler="federate"} 0 -http_request_size_bytes{handler="flags",quantile="0.5"} NaN -http_request_size_bytes{handler="flags",quantile="0.9"} NaN -http_request_size_bytes{handler="flags",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="flags"} 0 -http_request_size_bytes_count{handler="flags"} 0 -http_request_size_bytes{handler="graph",quantile="0.5"} 367 -http_request_size_bytes{handler="graph",quantile="0.9"} 389 -http_request_size_bytes{handler="graph",quantile="0.99"} 389 -http_request_size_bytes_sum{handler="graph"} 1145 -http_request_size_bytes_count{handler="graph"} 3 -http_request_size_bytes{handler="heap",quantile="0.5"} NaN -http_request_size_bytes{handler="heap",quantile="0.9"} NaN -http_request_size_bytes{handler="heap",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="heap"} 0 -http_request_size_bytes_count{handler="heap"} 0 -http_request_size_bytes{handler="label_values",quantile="0.5"} 416 -http_request_size_bytes{handler="label_values",quantile="0.9"} 416 -http_request_size_bytes{handler="label_values",quantile="0.99"} 416 -http_request_size_bytes_sum{handler="label_values"} 1248 -http_request_size_bytes_count{handler="label_values"} 3 -http_request_size_bytes{handler="options",quantile="0.5"} NaN -http_request_size_bytes{handler="options",quantile="0.9"} NaN -http_request_size_bytes{handler="options",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="options"} 0 -http_request_size_bytes_count{handler="options"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 -http_request_size_bytes_sum{handler="prometheus"} 109956 -http_request_size_bytes_count{handler="prometheus"} 462 -http_request_size_bytes{handler="query",quantile="0.5"} 531 -http_request_size_bytes{handler="query",quantile="0.9"} 531 -http_request_size_bytes{handler="query",quantile="0.99"} 531 -http_request_size_bytes_sum{handler="query"} 3186 -http_request_size_bytes_count{handler="query"} 6 -http_request_size_bytes{handler="query_range",quantile="0.5"} NaN -http_request_size_bytes{handler="query_range",quantile="0.9"} NaN -http_request_size_bytes{handler="query_range",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="query_range"} 0 -http_request_size_bytes_count{handler="query_range"} 0 -http_request_size_bytes{handler="rules",quantile="0.5"} NaN -http_request_size_bytes{handler="rules",quantile="0.9"} NaN -http_request_size_bytes{handler="rules",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="rules"} 0 -http_request_size_bytes_count{handler="rules"} 0 -http_request_size_bytes{handler="series",quantile="0.5"} NaN -http_request_size_bytes{handler="series",quantile="0.9"} NaN -http_request_size_bytes{handler="series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="series"} 0 -http_request_size_bytes_count{handler="series"} 0 -http_request_size_bytes{handler="static",quantile="0.5"} 379 -http_request_size_bytes{handler="static",quantile="0.9"} 379 -http_request_size_bytes{handler="static",quantile="0.99"} 379 -http_request_size_bytes_sum{handler="static"} 1137 -http_request_size_bytes_count{handler="static"} 3 -http_request_size_bytes{handler="status",quantile="0.5"} NaN -http_request_size_bytes{handler="status",quantile="0.9"} NaN -http_request_size_bytes{handler="status",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="status"} 0 -http_request_size_bytes_count{handler="status"} 0 -http_request_size_bytes{handler="targets",quantile="0.5"} NaN -http_request_size_bytes{handler="targets",quantile="0.9"} NaN -http_request_size_bytes{handler="targets",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="targets"} 0 -http_request_size_bytes_count{handler="targets"} 0 -http_request_size_bytes{handler="version",quantile="0.5"} NaN -http_request_size_bytes{handler="version",quantile="0.9"} NaN -http_request_size_bytes{handler="version",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="version"} 0 -http_request_size_bytes_count{handler="version"} 0 -http_requests_total{code="200",handler="graph",method="get"} 3 -http_requests_total{code="200",handler="label_values",method="get"} 3 -http_requests_total{code="200",handler="prometheus",method="get"} 462 -http_requests_total{code="200",handler="query",method="get"} 6 -http_requests_total{code="200",handler="static",method="get"} 3 -http_response_size_bytes{handler="alerts",quantile="0.5"} NaN -http_response_size_bytes{handler="alerts",quantile="0.9"} NaN -http_response_size_bytes{handler="alerts",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="alerts"} 0 -http_response_size_bytes_count{handler="alerts"} 0 -http_response_size_bytes{handler="config",quantile="0.5"} NaN -http_response_size_bytes{handler="config",quantile="0.9"} NaN -http_response_size_bytes{handler="config",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="config"} 0 -http_response_size_bytes_count{handler="config"} 0 -http_response_size_bytes{handler="consoles",quantile="0.5"} NaN -http_response_size_bytes{handler="consoles",quantile="0.9"} NaN -http_response_size_bytes{handler="consoles",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="consoles"} 0 -http_response_size_bytes_count{handler="consoles"} 0 -http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="drop_series"} 0 -http_response_size_bytes_count{handler="drop_series"} 0 -http_response_size_bytes{handler="federate",quantile="0.5"} NaN -http_response_size_bytes{handler="federate",quantile="0.9"} NaN -http_response_size_bytes{handler="federate",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="federate"} 0 -http_response_size_bytes_count{handler="federate"} 0 -http_response_size_bytes{handler="flags",quantile="0.5"} NaN -http_response_size_bytes{handler="flags",quantile="0.9"} NaN -http_response_size_bytes{handler="flags",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="flags"} 0 -http_response_size_bytes_count{handler="flags"} 0 -http_response_size_bytes{handler="graph",quantile="0.5"} 3619 -http_response_size_bytes{handler="graph",quantile="0.9"} 3619 -http_response_size_bytes{handler="graph",quantile="0.99"} 3619 -http_response_size_bytes_sum{handler="graph"} 10857 -http_response_size_bytes_count{handler="graph"} 3 -http_response_size_bytes{handler="heap",quantile="0.5"} NaN -http_response_size_bytes{handler="heap",quantile="0.9"} NaN -http_response_size_bytes{handler="heap",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="heap"} 0 -http_response_size_bytes_count{handler="heap"} 0 -http_response_size_bytes{handler="label_values",quantile="0.5"} 642 -http_response_size_bytes{handler="label_values",quantile="0.9"} 642 -http_response_size_bytes{handler="label_values",quantile="0.99"} 642 -http_response_size_bytes_sum{handler="label_values"} 1926 -http_response_size_bytes_count{handler="label_values"} 3 -http_response_size_bytes{handler="options",quantile="0.5"} NaN -http_response_size_bytes{handler="options",quantile="0.9"} NaN -http_response_size_bytes{handler="options",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="options"} 0 -http_response_size_bytes_count{handler="options"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 -http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 -http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 -http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 -http_response_size_bytes_count{handler="prometheus"} 462 -http_response_size_bytes{handler="query",quantile="0.5"} 776 -http_response_size_bytes{handler="query",quantile="0.9"} 781 -http_response_size_bytes{handler="query",quantile="0.99"} 781 -http_response_size_bytes_sum{handler="query"} 4656 -http_response_size_bytes_count{handler="query"} 6 -http_response_size_bytes{handler="query_range",quantile="0.5"} NaN -http_response_size_bytes{handler="query_range",quantile="0.9"} NaN -http_response_size_bytes{handler="query_range",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="query_range"} 0 -http_response_size_bytes_count{handler="query_range"} 0 -http_response_size_bytes{handler="rules",quantile="0.5"} NaN -http_response_size_bytes{handler="rules",quantile="0.9"} NaN -http_response_size_bytes{handler="rules",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="rules"} 0 -http_response_size_bytes_count{handler="rules"} 0 -http_response_size_bytes{handler="series",quantile="0.5"} NaN -http_response_size_bytes{handler="series",quantile="0.9"} NaN -http_response_size_bytes{handler="series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="series"} 0 -http_response_size_bytes_count{handler="series"} 0 -http_response_size_bytes{handler="static",quantile="0.5"} 6316 -http_response_size_bytes{handler="static",quantile="0.9"} 6316 -http_response_size_bytes{handler="static",quantile="0.99"} 6316 -http_response_size_bytes_sum{handler="static"} 18948 -http_response_size_bytes_count{handler="static"} 3 -http_response_size_bytes{handler="status",quantile="0.5"} NaN -http_response_size_bytes{handler="status",quantile="0.9"} NaN -http_response_size_bytes{handler="status",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="status"} 0 -http_response_size_bytes_count{handler="status"} 0 -http_response_size_bytes{handler="targets",quantile="0.5"} NaN -http_response_size_bytes{handler="targets",quantile="0.9"} NaN -http_response_size_bytes{handler="targets",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="targets"} 0 -http_response_size_bytes_count{handler="targets"} 0 -http_response_size_bytes{handler="version",quantile="0.5"} NaN -http_response_size_bytes{handler="version",quantile="0.9"} NaN -http_response_size_bytes{handler="version",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="version"} 0 -http_response_size_bytes_count{handler="version"} 0 -prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 -prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 -prometheus_config_last_reload_successful 1 -prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_count 1 -prometheus_evaluator_iterations_skipped_total 0 -prometheus_notifications_dropped_total 0 -prometheus_notifications_queue_capacity 10000 -prometheus_notifications_queue_length 0 -prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 -prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 -prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_azure_refresh_duration_seconds_sum 0 -prometheus_sd_azure_refresh_duration_seconds_count 0 -prometheus_sd_azure_refresh_failures_total 0 -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_failures_total 0 -prometheus_sd_dns_lookup_failures_total 0 -prometheus_sd_dns_lookups_total 0 -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_ec2_refresh_duration_seconds_sum 0 -prometheus_sd_ec2_refresh_duration_seconds_count 0 -prometheus_sd_ec2_refresh_failures_total 0 -prometheus_sd_file_read_errors_total 0 -prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN -prometheus_sd_file_scan_duration_seconds_sum 0 -prometheus_sd_file_scan_duration_seconds_count 0 -prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN -prometheus_sd_gce_refresh_duration_sum 0 -prometheus_sd_gce_refresh_duration_count 0 -prometheus_sd_gce_refresh_failures_total 0 -prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_marathon_refresh_duration_seconds_sum 0 -prometheus_sd_marathon_refresh_duration_seconds_count 0 -prometheus_sd_marathon_refresh_failures_total 0 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 -prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 -prometheus_target_interval_length_seconds_count{interval="50ms"} 685 -prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 -prometheus_target_skipped_scrapes_total 0 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 -prometheus_treecache_watcher_goroutines 0 -prometheus_treecache_zookeeper_failures_total 0 -# EOF diff --git a/model/textparse/promtestdata.txt b/model/textparse/promtestdata.txt deleted file mode 100644 index 174f383e91..0000000000 --- a/model/textparse/promtestdata.txt +++ /dev/null @@ -1,529 +0,0 @@ -# HELP go_gc_duration_seconds A summary of the GC invocation durations. -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 4.9351e-05 -go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 -go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 -go_gc_duration_seconds{quantile="0.75"} 0.000106744 -go_gc_duration_seconds{quantile="1"} 0.002072195 -go_gc_duration_seconds_sum 0.012139815 -go_gc_duration_seconds_count 99 -# HELP go_goroutines Number of goroutines that currently exist. -# TYPE go_goroutines gauge -go_goroutines 33 -# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. -# TYPE go_memstats_alloc_bytes gauge -go_memstats_alloc_bytes 1.7518624e+07 -# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. -# TYPE go_memstats_alloc_bytes_total counter -go_memstats_alloc_bytes_total 8.3062296e+08 -# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. -# TYPE go_memstats_buck_hash_sys_bytes gauge -go_memstats_buck_hash_sys_bytes 1.494637e+06 -# HELP go_memstats_frees_total Total number of frees. -# TYPE go_memstats_frees_total counter -go_memstats_frees_total 4.65658e+06 -# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. -# TYPE go_memstats_gc_sys_bytes gauge -go_memstats_gc_sys_bytes 1.107968e+06 -# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. -# TYPE go_memstats_heap_alloc_bytes gauge -go_memstats_heap_alloc_bytes 1.7518624e+07 -# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. -# TYPE go_memstats_heap_idle_bytes gauge -go_memstats_heap_idle_bytes 6.668288e+06 -# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. -# TYPE go_memstats_heap_inuse_bytes gauge -go_memstats_heap_inuse_bytes 1.8956288e+07 -# HELP go_memstats_heap_objects Number of allocated objects. -# TYPE go_memstats_heap_objects gauge -go_memstats_heap_objects 72755 -# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. -# TYPE go_memstats_heap_released_bytes_total counter -go_memstats_heap_released_bytes_total 0 -# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. -# TYPE go_memstats_heap_sys_bytes gauge -go_memstats_heap_sys_bytes 2.5624576e+07 -# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. -# TYPE go_memstats_last_gc_time_seconds gauge -go_memstats_last_gc_time_seconds 1.4843955586166437e+09 -# HELP go_memstats_lookups_total Total number of pointer lookups. -# TYPE go_memstats_lookups_total counter -go_memstats_lookups_total 2089 -# HELP go_memstats_mallocs_total Total number of mallocs. -# TYPE go_memstats_mallocs_total counter -go_memstats_mallocs_total 4.729335e+06 -# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. -# TYPE go_memstats_mcache_inuse_bytes gauge -go_memstats_mcache_inuse_bytes 9600 -# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. -# TYPE go_memstats_mcache_sys_bytes gauge -go_memstats_mcache_sys_bytes 16384 -# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. -# TYPE go_memstats_mspan_inuse_bytes gauge -go_memstats_mspan_inuse_bytes 211520 -# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. -# TYPE go_memstats_mspan_sys_bytes gauge -go_memstats_mspan_sys_bytes 245760 -# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. -# TYPE go_memstats_next_gc_bytes gauge -go_memstats_next_gc_bytes 2.033527e+07 -# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. -# TYPE go_memstats_other_sys_bytes gauge -go_memstats_other_sys_bytes 2.077323e+06 -# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. -# TYPE go_memstats_stack_inuse_bytes gauge -go_memstats_stack_inuse_bytes 1.6384e+06 -# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. -# TYPE go_memstats_stack_sys_bytes gauge -go_memstats_stack_sys_bytes 1.6384e+06 -# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. -# TYPE go_memstats_sys_bytes gauge -go_memstats_sys_bytes 3.2205048e+07 -# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. -# TYPE http_request_duration_microseconds summary -http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="alerts"} 0 -http_request_duration_microseconds_count{handler="alerts"} 0 -http_request_duration_microseconds{handler="config",quantile="0.5"} NaN -http_request_duration_microseconds{handler="config",quantile="0.9"} NaN -http_request_duration_microseconds{handler="config",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="config"} 0 -http_request_duration_microseconds_count{handler="config"} 0 -http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="consoles"} 0 -http_request_duration_microseconds_count{handler="consoles"} 0 -http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="drop_series"} 0 -http_request_duration_microseconds_count{handler="drop_series"} 0 -http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="federate"} 0 -http_request_duration_microseconds_count{handler="federate"} 0 -http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="flags"} 0 -http_request_duration_microseconds_count{handler="flags"} 0 -http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 -http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 -http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 -http_request_duration_microseconds_sum{handler="graph"} 5803.93 -http_request_duration_microseconds_count{handler="graph"} 3 -http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="heap"} 0 -http_request_duration_microseconds_count{handler="heap"} 0 -http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 -http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 -http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 -http_request_duration_microseconds_sum{handler="label_values"} 3995.574 -http_request_duration_microseconds_count{handler="label_values"} 3 -http_request_duration_microseconds{handler="options",quantile="0.5"} NaN -http_request_duration_microseconds{handler="options",quantile="0.9"} NaN -http_request_duration_microseconds{handler="options",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="options"} 0 -http_request_duration_microseconds_count{handler="options"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 -http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 -http_request_duration_microseconds_count{handler="prometheus"} 462 -http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 -http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 -http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 -http_request_duration_microseconds_sum{handler="query"} 26074.11 -http_request_duration_microseconds_count{handler="query"} 6 -http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="query_range"} 0 -http_request_duration_microseconds_count{handler="query_range"} 0 -http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="rules"} 0 -http_request_duration_microseconds_count{handler="rules"} 0 -http_request_duration_microseconds{handler="series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="series"} 0 -http_request_duration_microseconds_count{handler="series"} 0 -http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 -http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 -http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 -http_request_duration_microseconds_sum{handler="static"} 6458.621 -http_request_duration_microseconds_count{handler="static"} 3 -http_request_duration_microseconds{handler="status",quantile="0.5"} NaN -http_request_duration_microseconds{handler="status",quantile="0.9"} NaN -http_request_duration_microseconds{handler="status",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="status"} 0 -http_request_duration_microseconds_count{handler="status"} 0 -http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="targets"} 0 -http_request_duration_microseconds_count{handler="targets"} 0 -http_request_duration_microseconds{handler="version",quantile="0.5"} NaN -http_request_duration_microseconds{handler="version",quantile="0.9"} NaN -http_request_duration_microseconds{handler="version",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="version"} 0 -http_request_duration_microseconds_count{handler="version"} 0 -# HELP http_request_size_bytes The HTTP request sizes in bytes. -# TYPE http_request_size_bytes summary -http_request_size_bytes{handler="alerts",quantile="0.5"} NaN -http_request_size_bytes{handler="alerts",quantile="0.9"} NaN -http_request_size_bytes{handler="alerts",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="alerts"} 0 -http_request_size_bytes_count{handler="alerts"} 0 -http_request_size_bytes{handler="config",quantile="0.5"} NaN -http_request_size_bytes{handler="config",quantile="0.9"} NaN -http_request_size_bytes{handler="config",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="config"} 0 -http_request_size_bytes_count{handler="config"} 0 -http_request_size_bytes{handler="consoles",quantile="0.5"} NaN -http_request_size_bytes{handler="consoles",quantile="0.9"} NaN -http_request_size_bytes{handler="consoles",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="consoles"} 0 -http_request_size_bytes_count{handler="consoles"} 0 -http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="drop_series"} 0 -http_request_size_bytes_count{handler="drop_series"} 0 -http_request_size_bytes{handler="federate",quantile="0.5"} NaN -http_request_size_bytes{handler="federate",quantile="0.9"} NaN -http_request_size_bytes{handler="federate",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="federate"} 0 -http_request_size_bytes_count{handler="federate"} 0 -http_request_size_bytes{handler="flags",quantile="0.5"} NaN -http_request_size_bytes{handler="flags",quantile="0.9"} NaN -http_request_size_bytes{handler="flags",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="flags"} 0 -http_request_size_bytes_count{handler="flags"} 0 -http_request_size_bytes{handler="graph",quantile="0.5"} 367 -http_request_size_bytes{handler="graph",quantile="0.9"} 389 -http_request_size_bytes{handler="graph",quantile="0.99"} 389 -http_request_size_bytes_sum{handler="graph"} 1145 -http_request_size_bytes_count{handler="graph"} 3 -http_request_size_bytes{handler="heap",quantile="0.5"} NaN -http_request_size_bytes{handler="heap",quantile="0.9"} NaN -http_request_size_bytes{handler="heap",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="heap"} 0 -http_request_size_bytes_count{handler="heap"} 0 -http_request_size_bytes{handler="label_values",quantile="0.5"} 416 -http_request_size_bytes{handler="label_values",quantile="0.9"} 416 -http_request_size_bytes{handler="label_values",quantile="0.99"} 416 -http_request_size_bytes_sum{handler="label_values"} 1248 -http_request_size_bytes_count{handler="label_values"} 3 -http_request_size_bytes{handler="options",quantile="0.5"} NaN -http_request_size_bytes{handler="options",quantile="0.9"} NaN -http_request_size_bytes{handler="options",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="options"} 0 -http_request_size_bytes_count{handler="options"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 -http_request_size_bytes_sum{handler="prometheus"} 109956 -http_request_size_bytes_count{handler="prometheus"} 462 -http_request_size_bytes{handler="query",quantile="0.5"} 531 -http_request_size_bytes{handler="query",quantile="0.9"} 531 -http_request_size_bytes{handler="query",quantile="0.99"} 531 -http_request_size_bytes_sum{handler="query"} 3186 -http_request_size_bytes_count{handler="query"} 6 -http_request_size_bytes{handler="query_range",quantile="0.5"} NaN -http_request_size_bytes{handler="query_range",quantile="0.9"} NaN -http_request_size_bytes{handler="query_range",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="query_range"} 0 -http_request_size_bytes_count{handler="query_range"} 0 -http_request_size_bytes{handler="rules",quantile="0.5"} NaN -http_request_size_bytes{handler="rules",quantile="0.9"} NaN -http_request_size_bytes{handler="rules",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="rules"} 0 -http_request_size_bytes_count{handler="rules"} 0 -http_request_size_bytes{handler="series",quantile="0.5"} NaN -http_request_size_bytes{handler="series",quantile="0.9"} NaN -http_request_size_bytes{handler="series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="series"} 0 -http_request_size_bytes_count{handler="series"} 0 -http_request_size_bytes{handler="static",quantile="0.5"} 379 -http_request_size_bytes{handler="static",quantile="0.9"} 379 -http_request_size_bytes{handler="static",quantile="0.99"} 379 -http_request_size_bytes_sum{handler="static"} 1137 -http_request_size_bytes_count{handler="static"} 3 -http_request_size_bytes{handler="status",quantile="0.5"} NaN -http_request_size_bytes{handler="status",quantile="0.9"} NaN -http_request_size_bytes{handler="status",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="status"} 0 -http_request_size_bytes_count{handler="status"} 0 -http_request_size_bytes{handler="targets",quantile="0.5"} NaN -http_request_size_bytes{handler="targets",quantile="0.9"} NaN -http_request_size_bytes{handler="targets",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="targets"} 0 -http_request_size_bytes_count{handler="targets"} 0 -http_request_size_bytes{handler="version",quantile="0.5"} NaN -http_request_size_bytes{handler="version",quantile="0.9"} NaN -http_request_size_bytes{handler="version",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="version"} 0 -http_request_size_bytes_count{handler="version"} 0 -# HELP http_requests_total Total number of HTTP requests made. -# TYPE http_requests_total counter -http_requests_total{code="200",handler="graph",method="get"} 3 -http_requests_total{code="200",handler="label_values",method="get"} 3 -http_requests_total{code="200",handler="prometheus",method="get"} 462 -http_requests_total{code="200",handler="query",method="get"} 6 -http_requests_total{code="200",handler="static",method="get"} 3 -# HELP http_response_size_bytes The HTTP response sizes in bytes. -# TYPE http_response_size_bytes summary -http_response_size_bytes{handler="alerts",quantile="0.5"} NaN -http_response_size_bytes{handler="alerts",quantile="0.9"} NaN -http_response_size_bytes{handler="alerts",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="alerts"} 0 -http_response_size_bytes_count{handler="alerts"} 0 -http_response_size_bytes{handler="config",quantile="0.5"} NaN -http_response_size_bytes{handler="config",quantile="0.9"} NaN -http_response_size_bytes{handler="config",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="config"} 0 -http_response_size_bytes_count{handler="config"} 0 -http_response_size_bytes{handler="consoles",quantile="0.5"} NaN -http_response_size_bytes{handler="consoles",quantile="0.9"} NaN -http_response_size_bytes{handler="consoles",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="consoles"} 0 -http_response_size_bytes_count{handler="consoles"} 0 -http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="drop_series"} 0 -http_response_size_bytes_count{handler="drop_series"} 0 -http_response_size_bytes{handler="federate",quantile="0.5"} NaN -http_response_size_bytes{handler="federate",quantile="0.9"} NaN -http_response_size_bytes{handler="federate",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="federate"} 0 -http_response_size_bytes_count{handler="federate"} 0 -http_response_size_bytes{handler="flags",quantile="0.5"} NaN -http_response_size_bytes{handler="flags",quantile="0.9"} NaN -http_response_size_bytes{handler="flags",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="flags"} 0 -http_response_size_bytes_count{handler="flags"} 0 -http_response_size_bytes{handler="graph",quantile="0.5"} 3619 -http_response_size_bytes{handler="graph",quantile="0.9"} 3619 -http_response_size_bytes{handler="graph",quantile="0.99"} 3619 -http_response_size_bytes_sum{handler="graph"} 10857 -http_response_size_bytes_count{handler="graph"} 3 -http_response_size_bytes{handler="heap",quantile="0.5"} NaN -http_response_size_bytes{handler="heap",quantile="0.9"} NaN -http_response_size_bytes{handler="heap",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="heap"} 0 -http_response_size_bytes_count{handler="heap"} 0 -http_response_size_bytes{handler="label_values",quantile="0.5"} 642 -http_response_size_bytes{handler="label_values",quantile="0.9"} 642 -http_response_size_bytes{handler="label_values",quantile="0.99"} 642 -http_response_size_bytes_sum{handler="label_values"} 1926 -http_response_size_bytes_count{handler="label_values"} 3 -http_response_size_bytes{handler="options",quantile="0.5"} NaN -http_response_size_bytes{handler="options",quantile="0.9"} NaN -http_response_size_bytes{handler="options",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="options"} 0 -http_response_size_bytes_count{handler="options"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 -http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 -http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 -http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 -http_response_size_bytes_count{handler="prometheus"} 462 -http_response_size_bytes{handler="query",quantile="0.5"} 776 -http_response_size_bytes{handler="query",quantile="0.9"} 781 -http_response_size_bytes{handler="query",quantile="0.99"} 781 -http_response_size_bytes_sum{handler="query"} 4656 -http_response_size_bytes_count{handler="query"} 6 -http_response_size_bytes{handler="query_range",quantile="0.5"} NaN -http_response_size_bytes{handler="query_range",quantile="0.9"} NaN -http_response_size_bytes{handler="query_range",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="query_range"} 0 -http_response_size_bytes_count{handler="query_range"} 0 -http_response_size_bytes{handler="rules",quantile="0.5"} NaN -http_response_size_bytes{handler="rules",quantile="0.9"} NaN -http_response_size_bytes{handler="rules",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="rules"} 0 -http_response_size_bytes_count{handler="rules"} 0 -http_response_size_bytes{handler="series",quantile="0.5"} NaN -http_response_size_bytes{handler="series",quantile="0.9"} NaN -http_response_size_bytes{handler="series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="series"} 0 -http_response_size_bytes_count{handler="series"} 0 -http_response_size_bytes{handler="static",quantile="0.5"} 6316 -http_response_size_bytes{handler="static",quantile="0.9"} 6316 -http_response_size_bytes{handler="static",quantile="0.99"} 6316 -http_response_size_bytes_sum{handler="static"} 18948 -http_response_size_bytes_count{handler="static"} 3 -http_response_size_bytes{handler="status",quantile="0.5"} NaN -http_response_size_bytes{handler="status",quantile="0.9"} NaN -http_response_size_bytes{handler="status",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="status"} 0 -http_response_size_bytes_count{handler="status"} 0 -http_response_size_bytes{handler="targets",quantile="0.5"} NaN -http_response_size_bytes{handler="targets",quantile="0.9"} NaN -http_response_size_bytes{handler="targets",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="targets"} 0 -http_response_size_bytes_count{handler="targets"} 0 -http_response_size_bytes{handler="version",quantile="0.5"} NaN -http_response_size_bytes{handler="version",quantile="0.9"} NaN -http_response_size_bytes{handler="version",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="version"} 0 -http_response_size_bytes_count{handler="version"} 0 -# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built. -# TYPE prometheus_build_info gauge -prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 -# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. -# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge -prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 -# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. -# TYPE prometheus_config_last_reload_successful gauge -prometheus_config_last_reload_successful 1 -# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations. -# TYPE prometheus_evaluator_duration_seconds summary -prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_count 1 -# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage. -# TYPE prometheus_evaluator_iterations_skipped_total counter -prometheus_evaluator_iterations_skipped_total 0 -# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration. -# TYPE prometheus_notifications_dropped_total counter -prometheus_notifications_dropped_total 0 -# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. -# TYPE prometheus_notifications_queue_capacity gauge -prometheus_notifications_queue_capacity 10000 -# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. -# TYPE prometheus_notifications_queue_length gauge -prometheus_notifications_queue_length 0 -# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. -# TYPE prometheus_rule_evaluation_failures_total counter -prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 -prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 -# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds. -# TYPE prometheus_sd_azure_refresh_duration_seconds summary -prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_azure_refresh_duration_seconds_sum 0 -prometheus_sd_azure_refresh_duration_seconds_count 0 -# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures. -# TYPE prometheus_sd_azure_refresh_failures_total counter -prometheus_sd_azure_refresh_failures_total 0 -# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. -# TYPE prometheus_sd_consul_rpc_duration_seconds summary -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 -# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. -# TYPE prometheus_sd_consul_rpc_failures_total counter -prometheus_sd_consul_rpc_failures_total 0 -# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. -# TYPE prometheus_sd_dns_lookup_failures_total counter -prometheus_sd_dns_lookup_failures_total 0 -# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. -# TYPE prometheus_sd_dns_lookups_total counter -prometheus_sd_dns_lookups_total 0 -# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds. -# TYPE prometheus_sd_ec2_refresh_duration_seconds summary -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_ec2_refresh_duration_seconds_sum 0 -prometheus_sd_ec2_refresh_duration_seconds_count 0 -# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures. -# TYPE prometheus_sd_ec2_refresh_failures_total counter -prometheus_sd_ec2_refresh_failures_total 0 -# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. -# TYPE prometheus_sd_file_read_errors_total counter -prometheus_sd_file_read_errors_total 0 -# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. -# TYPE prometheus_sd_file_scan_duration_seconds summary -prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN -prometheus_sd_file_scan_duration_seconds_sum 0 -prometheus_sd_file_scan_duration_seconds_count 0 -# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds. -# TYPE prometheus_sd_gce_refresh_duration summary -prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN -prometheus_sd_gce_refresh_duration_sum 0 -prometheus_sd_gce_refresh_duration_count 0 -# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures. -# TYPE prometheus_sd_gce_refresh_failures_total counter -prometheus_sd_gce_refresh_failures_total 0 -# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. -# TYPE prometheus_sd_kubernetes_events_total counter -prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 -# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds. -# TYPE prometheus_sd_marathon_refresh_duration_seconds summary -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_marathon_refresh_duration_seconds_sum 0 -prometheus_sd_marathon_refresh_duration_seconds_count 0 -# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures. -# TYPE prometheus_sd_marathon_refresh_failures_total counter -prometheus_sd_marathon_refresh_failures_total 0 -# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. -# TYPE prometheus_target_interval_length_seconds summary -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 -prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 -prometheus_target_interval_length_seconds_count{interval="50ms"} 685 -# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. -# TYPE prometheus_target_scrape_pool_sync_total counter -prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 -# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled. -# TYPE prometheus_target_skipped_scrapes_total counter -prometheus_target_skipped_scrapes_total 0 -# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. -# TYPE prometheus_target_sync_length_seconds summary -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 -# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. -# TYPE prometheus_treecache_watcher_goroutines gauge -prometheus_treecache_watcher_goroutines 0 -# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. -# TYPE prometheus_treecache_zookeeper_failures_total counter -prometheus_treecache_zookeeper_failures_total 0 -# EOF diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index ea3a2e1a34..2ca6c03af7 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -15,43 +15,52 @@ package textparse import ( "bytes" - "encoding/binary" "errors" "fmt" "io" "math" + "strconv" "strings" + "sync" "unicode/utf8" - "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" + "github.com/prometheus/prometheus/schema" ) -// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus -// protobuf format and then present it as it if were parsed by a -// Prometheus-2-style text parser. This is only done so that we can easily plug -// in the protobuf format into Prometheus 2. For future use (with the final -// format that will be used for native histograms), we have to revisit the -// parsing. A lot of the efficiency tricks of the Prometheus-2-style parsing -// could be used in a similar fashion (byte-slice pointers into the raw -// payload), which requires some hand-coded protobuf handling. But the current -// parsers all expect the full series name (metric name plus label pairs) as one -// string, which is not how things are represented in the protobuf format. If -// the re-arrangement work is actually causing problems (which has to be seen), -// that expectation needs to be changed. +// floatFormatBufPool is exclusively used in formatOpenMetricsFloat. +var floatFormatBufPool = sync.Pool{ + New: func() interface{} { + // To contain at most 17 digits and additional syntax for a float64. + b := make([]byte, 0, 24) + return &b + }, +} + +// ProtobufParser parses the old Prometheus protobuf format and present it +// as the text-style textparse.Parser interface. +// +// It uses a tailored streaming protobuf dto.MetricStreamingDecoder that +// reuses internal protobuf structs and allows direct unmarshalling to Prometheus +// types like labels. type ProtobufParser struct { - in []byte // The intput to parse. - inPos int // Position within the input. - metricPos int // Position within Metric slice. + dec *dto.MetricStreamingDecoder + + // Used for both the string returned by Series and Histogram, as well as, + // metric family for Type, Unit and Help. + entryBytes *bytes.Buffer + + lset labels.Labels + builder labels.ScratchBuilder // Held here to reduce allocations when building Labels. + // fieldPos is the position within a Summary or (legacy) Histogram. -2 - // is the count. -1 is the sum. Otherwise it is the index within + // is the count. -1 is the sum. Otherwise, it is the index within // quantiles/buckets. fieldPos int fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. @@ -64,30 +73,25 @@ type ProtobufParser struct { exemplarReturned bool // state is marked by the entry we are processing. EntryInvalid implies - // that we have to decode the next MetricFamily. + // that we have to decode the next MetricDescriptor. state Entry - builder labels.ScratchBuilder // held here to reduce allocations when building Labels - - mf *dto.MetricFamily - - // Wether to also parse a classic histogram that is also present as a + // Whether to also parse a classic histogram that is also present as a // native histogram. - parseClassicHistograms bool - - // The following are just shenanigans to satisfy the Parser interface. - metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. + parseClassicHistograms bool + enableTypeAndUnitLabels bool } // NewProtobufParser returns a parser for the payload in the byte slice. -func NewProtobufParser(b []byte, parseClassicHistograms bool, st *labels.SymbolTable) Parser { +func NewProtobufParser(b []byte, parseClassicHistograms, enableTypeAndUnitLabels bool, st *labels.SymbolTable) Parser { return &ProtobufParser{ - in: b, - state: EntryInvalid, - mf: &dto.MetricFamily{}, - metricBytes: &bytes.Buffer{}, - parseClassicHistograms: parseClassicHistograms, - builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + dec: dto.NewMetricStreamingDecoder(b), + entryBytes: &bytes.Buffer{}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), // TODO(bwplotka): Try base builder. + + state: EntryInvalid, + parseClassicHistograms: parseClassicHistograms, + enableTypeAndUnitLabels: enableTypeAndUnitLabels, } } @@ -95,19 +99,18 @@ func NewProtobufParser(b []byte, parseClassicHistograms bool, st *labels.SymbolT // value, the timestamp if set, and the value of the current sample. func (p *ProtobufParser) Series() ([]byte, *int64, float64) { var ( - m = p.mf.GetMetric()[p.metricPos] - ts = m.GetTimestampMs() + ts = &p.dec.TimestampMs // To save memory allocations, never nil. v float64 ) - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER: - v = m.GetCounter().GetValue() + v = p.dec.GetCounter().GetValue() case dto.MetricType_GAUGE: - v = m.GetGauge().GetValue() + v = p.dec.GetGauge().GetValue() case dto.MetricType_UNTYPED: - v = m.GetUntyped().GetValue() + v = p.dec.GetUntyped().GetValue() case dto.MetricType_SUMMARY: - s := m.GetSummary() + s := p.dec.GetSummary() switch p.fieldPos { case -2: v = float64(s.GetSampleCount()) @@ -122,7 +125,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { } case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: // This should only happen for a classic histogram. - h := m.GetHistogram() + h := p.dec.GetHistogram() switch p.fieldPos { case -2: v = h.GetSampleCountFloat() @@ -148,8 +151,8 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { default: panic("encountered unexpected metric type, this is a bug") } - if ts != 0 { - return p.metricBytes.Bytes(), &ts, v + if *ts != 0 { + return p.entryBytes.Bytes(), ts, v } // TODO(beorn7): We assume here that ts==0 means no timestamp. That's // not true in general, but proto3 originally has no distinction between @@ -160,7 +163,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { // away from gogo-protobuf to an actively maintained protobuf // implementation. Once that's done, we can simply use the `optional` // keyword and check for the unset state explicitly. - return p.metricBytes.Bytes(), nil, v + return p.entryBytes.Bytes(), nil, v } // Histogram returns the bytes of a series with a native histogram as a value, @@ -175,47 +178,56 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { // value. func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { var ( - m = p.mf.GetMetric()[p.metricPos] - ts = m.GetTimestampMs() - h = m.GetHistogram() + ts = &p.dec.TimestampMs // To save memory allocations, never nil. + h = p.dec.GetHistogram() ) + if p.parseClassicHistograms && len(h.GetBucket()) > 0 { p.redoClassic = true } if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { // It is a float histogram. fh := histogram.FloatHistogram{ - Count: h.GetSampleCountFloat(), - Sum: h.GetSampleSum(), - ZeroThreshold: h.GetZeroThreshold(), - ZeroCount: h.GetZeroCountFloat(), - Schema: h.GetSchema(), + Count: h.GetSampleCountFloat(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCountFloat(), + Schema: h.GetSchema(), + + // Decoder reuses slices, so we need to copy. PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), - PositiveBuckets: h.GetPositiveCount(), + PositiveBuckets: make([]float64, len(h.GetPositiveCount())), NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), - NegativeBuckets: h.GetNegativeCount(), + NegativeBuckets: make([]float64, len(h.GetNegativeCount())), } for i, span := range h.GetPositiveSpan() { fh.PositiveSpans[i].Offset = span.GetOffset() fh.PositiveSpans[i].Length = span.GetLength() } + for i, cnt := range h.GetPositiveCount() { + fh.PositiveBuckets[i] = cnt + } for i, span := range h.GetNegativeSpan() { fh.NegativeSpans[i].Offset = span.GetOffset() fh.NegativeSpans[i].Length = span.GetLength() } - if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + for i, cnt := range h.GetNegativeCount() { + fh.NegativeBuckets[i] = cnt + } + if p.dec.GetType() == dto.MetricType_GAUGE_HISTOGRAM { fh.CounterResetHint = histogram.GaugeType } fh.Compact(0) - if ts != 0 { - return p.metricBytes.Bytes(), &ts, nil, &fh + if *ts != 0 { + return p.entryBytes.Bytes(), ts, nil, &fh } // Nasty hack: Assume that ts==0 means no timestamp. That's not true in // general, but proto3 has no distinction between unset and // default. Need to avoid in the final format. - return p.metricBytes.Bytes(), nil, nil, &fh + return p.entryBytes.Bytes(), nil, nil, &fh } + // TODO(bwplotka): Create sync.Pool for those structs. sh := histogram.Histogram{ Count: h.GetSampleCount(), Sum: h.GetSampleSum(), @@ -223,41 +235,47 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his ZeroCount: h.GetZeroCount(), Schema: h.GetSchema(), PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), - PositiveBuckets: h.GetPositiveDelta(), + PositiveBuckets: make([]int64, len(h.GetPositiveDelta())), NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), - NegativeBuckets: h.GetNegativeDelta(), + NegativeBuckets: make([]int64, len(h.GetNegativeDelta())), } for i, span := range h.GetPositiveSpan() { sh.PositiveSpans[i].Offset = span.GetOffset() sh.PositiveSpans[i].Length = span.GetLength() } + for i, cnt := range h.GetPositiveDelta() { + sh.PositiveBuckets[i] = cnt + } for i, span := range h.GetNegativeSpan() { sh.NegativeSpans[i].Offset = span.GetOffset() sh.NegativeSpans[i].Length = span.GetLength() } - if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + for i, cnt := range h.GetNegativeDelta() { + sh.NegativeBuckets[i] = cnt + } + if p.dec.GetType() == dto.MetricType_GAUGE_HISTOGRAM { sh.CounterResetHint = histogram.GaugeType } sh.Compact(0) - if ts != 0 { - return p.metricBytes.Bytes(), &ts, &sh, nil + if *ts != 0 { + return p.entryBytes.Bytes(), ts, &sh, nil } - return p.metricBytes.Bytes(), nil, &sh, nil + return p.entryBytes.Bytes(), nil, &sh, nil } // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. func (p *ProtobufParser) Help() ([]byte, []byte) { - return p.metricBytes.Bytes(), []byte(p.mf.GetHelp()) + return p.entryBytes.Bytes(), yoloBytes(p.dec.GetHelp()) } // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. func (p *ProtobufParser) Type() ([]byte, model.MetricType) { - n := p.metricBytes.Bytes() - switch p.mf.GetType() { + n := p.entryBytes.Bytes() + switch p.dec.GetType() { case dto.MetricType_COUNTER: return n, model.MetricTypeCounter case dto.MetricType_GAUGE: @@ -276,7 +294,7 @@ func (p *ProtobufParser) Type() ([]byte, model.MetricType) { // Must only be called after Next returned a unit entry. // The returned byte slices become invalid after the next call to Next. func (p *ProtobufParser) Unit() ([]byte, []byte) { - return p.metricBytes.Bytes(), []byte(p.mf.GetUnit()) + return p.entryBytes.Bytes(), []byte(p.dec.GetUnit()) } // Comment always returns nil because comments aren't supported by the protobuf @@ -285,24 +303,9 @@ func (p *ProtobufParser) Comment() []byte { return nil } -// Metric writes the labels of the current sample into the passed labels. -// It returns the string from which the metric was parsed. -func (p *ProtobufParser) Metric(l *labels.Labels) string { - p.builder.Reset() - p.builder.Add(labels.MetricName, p.getMagicName()) - - for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { - p.builder.Add(lp.GetName(), lp.GetValue()) - } - if needed, name, value := p.getMagicLabel(); needed { - p.builder.Add(name, value) - } - - // Sort labels to maintain the sorted labels invariant. - p.builder.Sort() - *l = p.builder.Labels() - - return p.metricBytes.String() +// Labels writes the labels of the current sample into the passed labels. +func (p *ProtobufParser) Labels(l *labels.Labels) { + *l = p.lset.Copy() } // Exemplar writes the exemplar of the current sample into the passed @@ -315,15 +318,14 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { // We only ever return one exemplar per (non-native-histogram) series. return false } - m := p.mf.GetMetric()[p.metricPos] var exProto *dto.Exemplar - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER: - exProto = m.GetCounter().GetExemplar() + exProto = p.dec.GetCounter().GetExemplar() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: isClassic := p.state == EntrySeries - if !isClassic && len(m.GetHistogram().GetExemplars()) > 0 { - exs := m.GetHistogram().GetExemplars() + if !isClassic && len(p.dec.GetHistogram().GetExemplars()) > 0 { + exs := p.dec.GetHistogram().GetExemplars() for p.exemplarPos < len(exs) { exProto = exs[p.exemplarPos] p.exemplarPos++ @@ -335,7 +337,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { return false } } else { - bb := m.GetHistogram().GetBucket() + bb := p.dec.GetHistogram().GetBucket() if p.fieldPos < 0 { if isClassic { return false // At _count or _sum. @@ -379,26 +381,24 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { return true } -// CreatedTimestamp returns CT or nil if CT is not present or -// invalid (as timestamp e.g. negative value) on counters, summaries or histograms. -func (p *ProtobufParser) CreatedTimestamp() *int64 { +// CreatedTimestamp returns CT or 0 if CT is not present on counters, summaries or histograms. +func (p *ProtobufParser) CreatedTimestamp() int64 { var ct *types.Timestamp - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER: - ct = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() + ct = p.dec.GetCounter().GetCreatedTimestamp() case dto.MetricType_SUMMARY: - ct = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() + ct = p.dec.GetSummary().GetCreatedTimestamp() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - ct = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() + ct = p.dec.GetHistogram().GetCreatedTimestamp() default: } - ctAsTime, err := types.TimestampFromProto(ct) - if err != nil { - // Errors means ct == nil or invalid timestamp, which we silently ignore. - return nil + if ct == nil { + return 0 } - ctMilis := ctAsTime.UnixMilli() - return &ctMilis + // Same as the gogo proto types.TimestampFromProto but straight to integer. + // and without validation. + return ct.GetSeconds()*1e3 + int64(ct.GetNanos())/1e6 } // Next advances the parser to the next "sample" (emulating the behavior of a @@ -407,30 +407,34 @@ func (p *ProtobufParser) CreatedTimestamp() *int64 { func (p *ProtobufParser) Next() (Entry, error) { p.exemplarReturned = false switch p.state { + // Invalid state occurs on: + // * First Next() call. + // * Recursive call that tells Next to move to the next metric family. case EntryInvalid: - p.metricPos = 0 + p.exemplarPos = 0 p.fieldPos = -2 - n, err := readDelimited(p.in[p.inPos:], p.mf) - p.inPos += n - if err != nil { + + if err := p.dec.NextMetricFamily(); err != nil { return p.state, err } - - // Skip empty metric families. - if len(p.mf.GetMetric()) == 0 { - return p.Next() + if err := p.dec.NextMetric(); err != nil { + // Skip empty metric families. + if errors.Is(err, io.EOF) { + return p.Next() + } + return EntryInvalid, err } // We are at the beginning of a metric family. Put only the name - // into metricBytes and validate only name, help, and type for now. - name := p.mf.GetName() + // into entryBytes and validate only name, help, and type for now. + name := p.dec.GetName() if !model.IsValidMetricName(model.LabelValue(name)) { return EntryInvalid, fmt.Errorf("invalid metric name: %s", name) } - if help := p.mf.GetHelp(); !utf8.ValidString(help) { + if help := p.dec.GetHelp(); !utf8.ValidString(help) { return EntryInvalid, fmt.Errorf("invalid help for metric %q: %s", name, help) } - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER, dto.MetricType_GAUGE, dto.MetricType_HISTOGRAM, @@ -439,11 +443,11 @@ func (p *ProtobufParser) Next() (Entry, error) { dto.MetricType_UNTYPED: // All good. default: - return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.dec.GetType()) } - unit := p.mf.GetUnit() + unit := p.dec.GetUnit() if len(unit) > 0 { - if p.mf.GetType() == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + if p.dec.GetType() == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { if !strings.HasSuffix(name[:len(name)-6], unit) || len(name)-6 < len(unit)+1 || name[len(name)-6-len(unit)-1] != '_' { return EntryInvalid, fmt.Errorf("unit %q not a suffix of counter %q", unit, name) } @@ -451,54 +455,90 @@ func (p *ProtobufParser) Next() (Entry, error) { return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", unit, name) } } - p.metricBytes.Reset() - p.metricBytes.WriteString(name) - + p.entryBytes.Reset() + p.entryBytes.WriteString(name) p.state = EntryHelp case EntryHelp: + if p.dec.Unit != "" { + p.state = EntryUnit + } else { + p.state = EntryType + } + case EntryUnit: p.state = EntryType case EntryType: - t := p.mf.GetType() + t := p.dec.GetType() if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && - isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + isNativeHistogram(p.dec.GetHistogram()) { p.state = EntryHistogram } else { p.state = EntrySeries } - if err := p.updateMetricBytes(); err != nil { + if err := p.onSeriesOrHistogramUpdate(); err != nil { return EntryInvalid, err } - case EntryHistogram, EntrySeries: - if p.redoClassic { - p.redoClassic = false - p.state = EntrySeries - p.fieldPos = -3 - p.fieldsDone = false - } - t := p.mf.GetType() - if p.state == EntrySeries && !p.fieldsDone && - (t == dto.MetricType_SUMMARY || - t == dto.MetricType_HISTOGRAM || - t == dto.MetricType_GAUGE_HISTOGRAM) { - p.fieldPos++ - } else { - p.metricPos++ + case EntrySeries: + // Potentially a second series in the metric family. + t := p.dec.GetType() + if t == dto.MetricType_SUMMARY || + t == dto.MetricType_HISTOGRAM || + t == dto.MetricType_GAUGE_HISTOGRAM { + // Non-trivial series (complex metrics, with magic suffixes). + + // Did we iterate over all the classic representations fields? + // NOTE: p.fieldsDone is updated on p.onSeriesOrHistogramUpdate. + if !p.fieldsDone { + // Still some fields to iterate over. + p.fieldPos++ + if err := p.onSeriesOrHistogramUpdate(); err != nil { + return EntryInvalid, err + } + return p.state, nil + } + + // Reset histogram fields. p.fieldPos = -2 p.fieldsDone = false + p.exemplarPos = 0 + // If this is a metric family containing native - // histograms, we have to switch back to native - // histograms after parsing a classic histogram. - if p.state == EntrySeries && - (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && - isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + // histograms, it means we are here thanks to redoClassic state. + // Return to native histograms for the consistent flow. + if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.dec.GetHistogram()) { p.state = EntryHistogram } } - if p.metricPos >= len(p.mf.GetMetric()) { - p.state = EntryInvalid - return p.Next() + // Is there another series? + if err := p.dec.NextMetric(); err != nil { + if errors.Is(err, io.EOF) { + p.state = EntryInvalid + return p.Next() + } + return EntryInvalid, err } - if err := p.updateMetricBytes(); err != nil { + if err := p.onSeriesOrHistogramUpdate(); err != nil { + return EntryInvalid, err + } + case EntryHistogram: + // Was Histogram() called and parseClassicHistograms is true? + if p.redoClassic { + p.redoClassic = false + p.fieldPos = -3 + p.fieldsDone = false + p.state = EntrySeries + return p.Next() // Switch to classic histogram. + } + + // Is there another series? + if err := p.dec.NextMetric(); err != nil { + if errors.Is(err, io.EOF) { + p.state = EntryInvalid + return p.Next() + } + return EntryInvalid, err + } + if err := p.onSeriesOrHistogramUpdate(); err != nil { return EntryInvalid, err } default: @@ -507,30 +547,56 @@ func (p *ProtobufParser) Next() (Entry, error) { return p.state, nil } -func (p *ProtobufParser) updateMetricBytes() error { - b := p.metricBytes - b.Reset() - b.WriteString(p.getMagicName()) - for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { - b.WriteByte(model.SeparatorByte) - n := lp.GetName() - if !model.LabelName(n).IsValid() { - return fmt.Errorf("invalid label name: %s", n) +// onSeriesOrHistogramUpdate updates internal state before returning +// a series or histogram. It updates: +// * p.lset. +// * p.entryBytes. +// * p.fieldsDone depending on p.fieldPos. +func (p *ProtobufParser) onSeriesOrHistogramUpdate() error { + p.builder.Reset() + + if p.enableTypeAndUnitLabels { + _, typ := p.Type() + + m := schema.Metadata{ + Name: p.getMagicName(), + Type: typ, + Unit: p.dec.GetUnit(), } - b.WriteString(n) - b.WriteByte(model.SeparatorByte) - v := lp.GetValue() - if !utf8.ValidString(v) { - return fmt.Errorf("invalid label value: %s", v) + m.AddToLabels(&p.builder) + if err := p.dec.Label(schema.IgnoreOverriddenMetadataLabelsScratchBuilder{ + Overwrite: m, + ScratchBuilder: &p.builder, + }); err != nil { + return err + } + } else { + p.builder.Add(labels.MetricName, p.getMagicName()) + if err := p.dec.Label(&p.builder); err != nil { + return err } - b.WriteString(v) } - if needed, n, v := p.getMagicLabel(); needed { - b.WriteByte(model.SeparatorByte) - b.WriteString(n) - b.WriteByte(model.SeparatorByte) - b.WriteString(v) + + if needed, name, value := p.getMagicLabel(); needed { + p.builder.Add(name, value) } + + // Sort labels to maintain the sorted labels invariant. + p.builder.Sort() + p.builder.Overwrite(&p.lset) + + // entryBytes has to be unique for each series. + p.entryBytes.Reset() + p.lset.Range(func(l labels.Label) { + if l.Name == labels.MetricName { + p.entryBytes.WriteString(l.Value) + return + } + p.entryBytes.WriteByte(model.SeparatorByte) + p.entryBytes.WriteString(l.Name) + p.entryBytes.WriteByte(model.SeparatorByte) + p.entryBytes.WriteString(l.Value) + }) return nil } @@ -538,36 +604,37 @@ func (p *ProtobufParser) updateMetricBytes() error { // ("_count", "_sum", "_bucket") if needed according to the current parser // state. func (p *ProtobufParser) getMagicName() string { - t := p.mf.GetType() + t := p.dec.GetType() if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) { - return p.mf.GetName() + return p.dec.GetName() } if p.fieldPos == -2 { - return p.mf.GetName() + "_count" + return p.dec.GetName() + "_count" } if p.fieldPos == -1 { - return p.mf.GetName() + "_sum" + return p.dec.GetName() + "_sum" } if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { - return p.mf.GetName() + "_bucket" + return p.dec.GetName() + "_bucket" } - return p.mf.GetName() + return p.dec.GetName() } // getMagicLabel returns if a magic label ("quantile" or "le") is needed and, if // so, its name and value. It also sets p.fieldsDone if applicable. func (p *ProtobufParser) getMagicLabel() (bool, string, string) { + // Native histogram or _count and _sum series. if p.state == EntryHistogram || p.fieldPos < 0 { return false, "", "" } - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_SUMMARY: - qq := p.mf.GetMetric()[p.metricPos].GetSummary().GetQuantile() + qq := p.dec.GetSummary().GetQuantile() q := qq[p.fieldPos] p.fieldsDone = p.fieldPos == len(qq)-1 return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile()) case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket() + bb := p.dec.GetHistogram().GetBucket() if p.fieldPos >= len(bb) { p.fieldsDone = true return true, model.BucketLabel, "+Inf" @@ -579,30 +646,7 @@ func (p *ProtobufParser) getMagicLabel() (bool, string, string) { return false, "", "" } -var errInvalidVarint = errors.New("protobufparse: invalid varint encountered") - -// readDelimited is essentially doing what the function of the same name in -// github.com/matttproud/golang_protobuf_extensions/pbutil is doing, but it is -// specific to a MetricFamily, utilizes the more efficient gogo-protobuf -// unmarshaling, and acts on a byte slice directly without any additional -// staging buffers. -func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { - if len(b) == 0 { - return 0, io.EOF - } - messageLength, varIntLength := proto.DecodeVarint(b) - if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 { - return 0, errInvalidVarint - } - totalLength := varIntLength + int(messageLength) - if totalLength > len(b) { - return 0, fmt.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) - } - mf.Reset() - return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) -} - -// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// formatOpenMetricsFloat works like the usual Go string formatting of a float // but appends ".0" if the resulting number would otherwise contain neither a // "." nor an "e". func formatOpenMetricsFloat(f float64) string { @@ -621,11 +665,15 @@ func formatOpenMetricsFloat(f float64) string { case math.IsInf(f, -1): return "-Inf" } - s := fmt.Sprint(f) - if strings.ContainsAny(s, "e.") { - return s + bp := floatFormatBufPool.Get().(*[]byte) + defer floatFormatBufPool.Put(bp) + + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + if bytes.ContainsAny(*bp, "e.") { + return string(*bp) } - return s + ".0" + *bp = append(*bp, '.', '0') + return string(*bp) } // isNativeHistogram returns false iff the provided histograms has no spans at diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index e323a6cc8f..35a4238fdb 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -16,8 +16,6 @@ package textparse import ( "bytes" "encoding/binary" - "errors" - "io" "testing" "github.com/gogo/protobuf/proto" @@ -27,12 +25,12 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/util/testutil" - dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) -func createTestProtoBuf(t *testing.T) *bytes.Buffer { +func createTestProtoBuf(t testing.TB) *bytes.Buffer { + t.Helper() + testMetricFamilies := []string{ `name: "go_build_info" help: "Build information about the main Go module." @@ -411,6 +409,49 @@ metric: < > > +`, + `name: "test_histogram3" +help: "Similar histogram as before but now with integer buckets." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 6 + sample_sum: 50 + bucket: < + cumulative_count: 2 + upper_bound: -20 + > + bucket: < + cumulative_count: 4 + upper_bound: 20 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: 15 + timestamp: < + seconds: 1625851153 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 6 + upper_bound: 30 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: 25 + > + > + schema: 0 + zero_threshold: 0 + > +> + `, `name: "test_histogram_family" help: "Test histogram metric family with two very simple histograms." @@ -541,8 +582,8 @@ metric: < counter: < value: 42 created_timestamp: < - seconds: 1 - nanos: 1 + seconds: 1625851153 + nanos: 146848499 > > > @@ -556,8 +597,8 @@ metric: < sample_count: 42 sample_sum: 1.234 created_timestamp: < - seconds: 1 - nanos: 1 + seconds: 1625851153 + nanos: 146848499 > > > @@ -569,8 +610,8 @@ type: HISTOGRAM metric: < histogram: < created_timestamp: < - seconds: 1 - nanos: 1 + seconds: 1625851153 + nanos: 146848499 > positive_span: < offset: 0 @@ -586,8 +627,8 @@ type: GAUGE_HISTOGRAM metric: < histogram: < created_timestamp: < - seconds: 1 - nanos: 1 + seconds: 1625851153 + nanos: 146848499 > positive_span: < offset: 0 @@ -695,6 +736,70 @@ metric: < timestamp_ms: 1234568 > +`, + + `name: "test_histogram_with_native_histogram_exemplars2" +help: "Another histogram with native histogram exemplars." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + exemplars: < + label: < + name: "dummyID" + value: "59780" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + timestamp_ms: 1234568 +> + `, } @@ -719,32 +824,17 @@ metric: < } func TestProtobufParse(t *testing.T) { - type parseResult struct { - lset labels.Labels - m string - t int64 - v float64 - typ model.MetricType - help string - unit string - comment string - shs *histogram.Histogram - fhs *histogram.FloatHistogram - e []exemplar.Exemplar - ct int64 - } - inputBuf := createTestProtoBuf(t) scenarios := []struct { name string parser Parser - expected []parseResult + expected []parsedEntry }{ { - name: "ignore classic buckets of native histograms", - parser: NewProtobufParser(inputBuf.Bytes(), false, labels.NewSymbolTable()), - expected: []parseResult{ + name: "parseClassicHistograms=false/enableTypeAndUnitLabels=false", + parser: NewProtobufParser(inputBuf.Bytes(), false, false, labels.NewSymbolTable()), + expected: []parsedEntry{ { m: "go_build_info", help: "Build information about the main Go module.", @@ -754,7 +844,7 @@ func TestProtobufParse(t *testing.T) { typ: model.MetricTypeGauge, }, { - m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", + m: "go_build_info\xffchecksum\xff\xffpath\xffgithub.com/prometheus/client_golang\xffversion\xff(devel)", v: 1, lset: labels.FromStrings( "__name__", "go_build_info", @@ -766,6 +856,9 @@ func TestProtobufParse(t *testing.T) { { m: "go_memstats_alloc_bytes_total", help: "Total number of bytes allocated, even if freed.", + }, + { + m: "go_memstats_alloc_bytes_total", unit: "bytes", }, { @@ -778,7 +871,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "go_memstats_alloc_bytes_total", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, }, }, @@ -792,7 +885,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "something_untyped", - t: 1234567, + t: int64p(1234567), v: 42, lset: labels.FromStrings( "__name__", "something_untyped", @@ -808,7 +901,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -829,7 +922,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -843,7 +936,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gauge_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, Count: 175, @@ -865,7 +958,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -879,7 +972,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ Count: 175.0, ZeroCount: 2.0, @@ -900,7 +993,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -914,7 +1007,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gauge_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ CounterResetHint: histogram.GaugeType, Count: 175.0, @@ -936,7 +1029,7 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, @@ -977,7 +1070,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram2_bucket", "le", "-0.00038", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, }, }, @@ -988,7 +1081,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram2_bucket", "le", "1.0", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, }, }, @@ -1000,6 +1093,66 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, + { + m: "test_histogram3", + help: "Similar histogram as before but now with integer buckets.", + }, + { + m: "test_histogram3", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram3_count", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_count", + ), + }, + { + m: "test_histogram3_sum", + v: 50, + lset: labels.FromStrings( + "__name__", "test_histogram3_sum", + ), + }, + { + m: "test_histogram3_bucket\xffle\xff-20.0", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "-20.0", + ), + }, + { + m: "test_histogram3_bucket\xffle\xff20.0", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "20.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram3_bucket\xffle\xff30.0", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "30.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: false}, + }, + }, + { + m: "test_histogram3_bucket\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "+Inf", + ), + }, { m: "test_histogram_family", help: "Test histogram metric family with two very simple histograms.", @@ -1093,7 +1246,7 @@ func TestProtobufParse(t *testing.T) { ), }, { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + m: "rpc_durations_seconds\xffquantile\xff0.5\xffservice\xffexponential", v: 6.442786329648548e-07, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", @@ -1102,7 +1255,7 @@ func TestProtobufParse(t *testing.T) { ), }, { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + m: "rpc_durations_seconds\xffquantile\xff0.9\xffservice\xffexponential", v: 1.9435742936658396e-06, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", @@ -1111,7 +1264,7 @@ func TestProtobufParse(t *testing.T) { ), }, { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + m: "rpc_durations_seconds\xffquantile\xff0.99\xffservice\xffexponential", v: 4.0471608667037015e-06, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", @@ -1171,7 +1324,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_counter_with_createdtimestamp", v: 42, - ct: 1000, + ct: 1625851153146, lset: labels.FromStrings( "__name__", "test_counter_with_createdtimestamp", ), @@ -1187,7 +1340,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_summary_with_createdtimestamp_count", v: 42, - ct: 1000, + ct: 1625851153146, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_count", ), @@ -1195,7 +1348,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_summary_with_createdtimestamp_sum", v: 1.234, - ct: 1000, + ct: 1625851153146, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_sum", ), @@ -1210,7 +1363,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_with_createdtimestamp", - ct: 1000, + ct: 1625851153146, shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, PositiveSpans: []histogram.Span{}, @@ -1230,7 +1383,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gaugehistogram_with_createdtimestamp", - ct: 1000, + ct: 1625851153146, shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, PositiveSpans: []histogram.Span{}, @@ -1250,7 +1403,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_with_native_histogram_exemplars", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -1271,27 +1424,734 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, }, + { + m: "test_histogram_with_native_histogram_exemplars2", + help: "Another histogram with native histogram exemplars.", + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + t: int64p(1234568), + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, }, }, { - name: "parse classic and native buckets", - parser: NewProtobufParser(inputBuf.Bytes(), true, labels.NewSymbolTable()), - expected: []parseResult{ - { // 0 + name: "parseClassicHistograms=false/enableTypeAndUnitLabels=true", + parser: NewProtobufParser(inputBuf.Bytes(), false, true, labels.NewSymbolTable()), + expected: []parsedEntry{ + { m: "go_build_info", help: "Build information about the main Go module.", }, - { // 1 + { m: "go_build_info", typ: model.MetricTypeGauge, }, - { // 2 - m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", + { + m: "go_build_info\xff__type__\xffgauge\xffchecksum\xff\xffpath\xffgithub.com/prometheus/client_golang\xffversion\xff(devel)", + v: 1, + lset: labels.FromStrings( + "__name__", "go_build_info", + "__type__", string(model.MetricTypeGauge), + "checksum", "", + "path", "github.com/prometheus/client_golang", + "version", "(devel)", + ), + }, + { + m: "go_memstats_alloc_bytes_total", + help: "Total number of bytes allocated, even if freed.", + }, + { + m: "go_memstats_alloc_bytes_total", + unit: "bytes", + }, + { + m: "go_memstats_alloc_bytes_total", + typ: model.MetricTypeCounter, + }, + { + m: "go_memstats_alloc_bytes_total\xff__type__\xffcounter\xff__unit__\xffbytes", + v: 1.546544e+06, + lset: labels.FromStrings( + "__name__", "go_memstats_alloc_bytes_total", + "__type__", string(model.MetricTypeCounter), + "__unit__", "bytes", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, + }, + }, + { + m: "something_untyped", + help: "Just to test the untyped type.", + }, + { + m: "something_untyped", + typ: model.MetricTypeUnknown, + }, + { + m: "something_untyped", + t: int64p(1234567), + v: 42, + lset: labels.FromStrings( + "__name__", "something_untyped", + ), + }, + { + m: "test_histogram", + help: "Test histogram with many buckets removed to keep it manageable in size.", + }, + { + m: "test_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram\xff__type__\xffhistogram", + t: int64p(1234568), + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram", + "__type__", string(model.MetricTypeHistogram), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_gauge_histogram", + help: "Like test_histogram but as gauge histogram.", + }, + { + m: "test_gauge_histogram", + typ: model.MetricTypeGaugeHistogram, + }, + { + m: "test_gauge_histogram\xff__type__\xffgaugehistogram", + t: int64p(1234568), + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram", + "__type__", string(model.MetricTypeGaugeHistogram), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_float_histogram", + help: "Test float histogram with many buckets removed to keep it manageable in size.", + }, + { + m: "test_float_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "test_float_histogram\xff__type__\xffhistogram", + t: int64p(1234568), + fhs: &histogram.FloatHistogram{ + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram", + "__type__", string(model.MetricTypeHistogram), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_gauge_float_histogram", + help: "Like test_float_histogram but as gauge histogram.", + }, + { + m: "test_gauge_float_histogram", + typ: model.MetricTypeGaugeHistogram, + }, + { + m: "test_gauge_float_histogram\xff__type__\xffgaugehistogram", + t: int64p(1234568), + fhs: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram", + "__type__", string(model.MetricTypeGaugeHistogram), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_histogram2", + help: "Similar histogram as before but now without sparse buckets.", + }, + { + m: "test_histogram2", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram2_count\xff__type__\xffhistogram", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_count", + "__type__", string(model.MetricTypeHistogram), + ), + }, + { + m: "test_histogram2_sum\xff__type__\xffhistogram", + v: 0.000828, + lset: labels.FromStrings( + "__name__", "test_histogram2_sum", + "__type__", string(model.MetricTypeHistogram), + ), + }, + { + m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff-0.00048", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "__type__", string(model.MetricTypeHistogram), + "le", "-0.00048", + ), + }, + { + m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff-0.00038", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "__type__", string(model.MetricTypeHistogram), + "le", "-0.00038", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff1.0", + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "__type__", string(model.MetricTypeHistogram), + "le", "1.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, + }, + }, + { + m: "test_histogram2_bucket\xff__type__\xffhistogram\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "__type__", string(model.MetricTypeHistogram), + "le", "+Inf", + ), + }, + { + m: "test_histogram3", + help: "Similar histogram as before but now with integer buckets.", + }, + { + m: "test_histogram3", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram3_count\xff__type__\xffhistogram", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_count", + "__type__", string(model.MetricTypeHistogram), + ), + }, + { + m: "test_histogram3_sum\xff__type__\xffhistogram", + v: 50, + lset: labels.FromStrings( + "__name__", "test_histogram3_sum", + "__type__", string(model.MetricTypeHistogram), + ), + }, + { + m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff-20.0", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "__type__", string(model.MetricTypeHistogram), + "le", "-20.0", + ), + }, + { + m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff20.0", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "__type__", string(model.MetricTypeHistogram), + "le", "20.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff30.0", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "__type__", string(model.MetricTypeHistogram), + "le", "30.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: false}, + }, + }, + { + m: "test_histogram3_bucket\xff__type__\xffhistogram\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "__type__", string(model.MetricTypeHistogram), + "le", "+Inf", + ), + }, + { + m: "test_histogram_family", + help: "Test histogram metric family with two very simple histograms.", + }, + { + m: "test_histogram_family", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_family\xff__type__\xffhistogram\xfffoo\xffbar", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 5, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{2, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "__type__", string(model.MetricTypeHistogram), + "foo", "bar", + ), + }, + { + m: "test_histogram_family\xff__type__\xffhistogram\xfffoo\xffbaz", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + Count: 6, + Sum: 13.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + NegativeSpans: []histogram.Span{}, + PositiveBuckets: []int64{1, 4}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_family", + "__type__", string(model.MetricTypeHistogram), + "foo", "baz", + ), + }, + { + m: "test_float_histogram_with_zerothreshold_zero", + help: "Test float histogram with a zero threshold of zero.", + }, + { + m: "test_float_histogram_with_zerothreshold_zero", + typ: model.MetricTypeHistogram, + }, + { + m: "test_float_histogram_with_zerothreshold_zero\xff__type__\xffhistogram", + fhs: &histogram.FloatHistogram{ + Count: 5.0, + Sum: 12.1, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: 8, Length: 2}, + }, + PositiveBuckets: []float64{2.0, 3.0}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram_with_zerothreshold_zero", + "__type__", string(model.MetricTypeHistogram), + ), + }, + { + m: "rpc_durations_seconds", + help: "RPC latency distributions.", + }, + { + m: "rpc_durations_seconds", + typ: model.MetricTypeSummary, + }, + { + m: "rpc_durations_seconds_count\xff__type__\xffsummary\xffservice\xffexponential", + v: 262, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_count", + "__type__", string(model.MetricTypeSummary), + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds_sum\xff__type__\xffsummary\xffservice\xffexponential", + v: 0.00025551262820703587, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_sum", + "__type__", string(model.MetricTypeSummary), + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xff__type__\xffsummary\xffquantile\xff0.5\xffservice\xffexponential", + v: 6.442786329648548e-07, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "__type__", string(model.MetricTypeSummary), + "quantile", "0.5", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xff__type__\xffsummary\xffquantile\xff0.9\xffservice\xffexponential", + v: 1.9435742936658396e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "__type__", string(model.MetricTypeSummary), + "quantile", "0.9", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xff__type__\xffsummary\xffquantile\xff0.99\xffservice\xffexponential", + v: 4.0471608667037015e-06, + lset: labels.FromStrings( + "__type__", string(model.MetricTypeSummary), + "__name__", "rpc_durations_seconds", + "quantile", "0.99", + "service", "exponential", + ), + }, + { + m: "without_quantiles", + help: "A summary without quantiles.", + }, + { + m: "without_quantiles", + typ: model.MetricTypeSummary, + }, + { + m: "without_quantiles_count\xff__type__\xffsummary", + v: 42, + lset: labels.FromStrings( + "__name__", "without_quantiles_count", + "__type__", string(model.MetricTypeSummary), + ), + }, + { + m: "without_quantiles_sum\xff__type__\xffsummary", + v: 1.234, + lset: labels.FromStrings( + "__name__", "without_quantiles_sum", + "__type__", string(model.MetricTypeSummary), + ), + }, + { + m: "empty_histogram", + help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", + }, + { + m: "empty_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "empty_histogram\xff__type__\xffhistogram", + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "empty_histogram", + "__type__", string(model.MetricTypeHistogram), + ), + }, + { + m: "test_counter_with_createdtimestamp", + help: "A counter with a created timestamp.", + }, + { + m: "test_counter_with_createdtimestamp", + typ: model.MetricTypeCounter, + }, + { + m: "test_counter_with_createdtimestamp\xff__type__\xffcounter", + v: 42, + ct: 1625851153146, + lset: labels.FromStrings( + "__name__", "test_counter_with_createdtimestamp", + "__type__", string(model.MetricTypeCounter), + ), + }, + { + m: "test_summary_with_createdtimestamp", + help: "A summary with a created timestamp.", + }, + { + m: "test_summary_with_createdtimestamp", + typ: model.MetricTypeSummary, + }, + { + m: "test_summary_with_createdtimestamp_count\xff__type__\xffsummary", + v: 42, + ct: 1625851153146, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_count", + "__type__", string(model.MetricTypeSummary), + ), + }, + { + m: "test_summary_with_createdtimestamp_sum\xff__type__\xffsummary", + v: 1.234, + ct: 1625851153146, + lset: labels.FromStrings( + "__name__", "test_summary_with_createdtimestamp_sum", + "__type__", string(model.MetricTypeSummary), + ), + }, + { + m: "test_histogram_with_createdtimestamp", + help: "A histogram with a created timestamp.", + }, + { + m: "test_histogram_with_createdtimestamp", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_createdtimestamp\xff__type__\xffhistogram", + ct: 1625851153146, + shs: &histogram.Histogram{ + CounterResetHint: histogram.UnknownCounterReset, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_createdtimestamp", + "__type__", string(model.MetricTypeHistogram), + ), + }, + { + m: "test_gaugehistogram_with_createdtimestamp", + help: "A gauge histogram with a created timestamp.", + }, + { + m: "test_gaugehistogram_with_createdtimestamp", + typ: model.MetricTypeGaugeHistogram, + }, + { + m: "test_gaugehistogram_with_createdtimestamp\xff__type__\xffgaugehistogram", + ct: 1625851153146, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + PositiveSpans: []histogram.Span{}, + NegativeSpans: []histogram.Span{}, + }, + lset: labels.FromStrings( + "__name__", "test_gaugehistogram_with_createdtimestamp", + "__type__", string(model.MetricTypeGaugeHistogram), + ), + }, + { + m: "test_histogram_with_native_histogram_exemplars", + help: "A histogram with native histogram exemplars.", + }, + { + m: "test_histogram_with_native_histogram_exemplars", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_native_histogram_exemplars\xff__type__\xffhistogram", + t: int64p(1234568), + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars", + "__type__", string(model.MetricTypeHistogram), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, + }, + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + help: "Another histogram with native histogram exemplars.", + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_native_histogram_exemplars2\xff__type__\xffhistogram", + t: int64p(1234568), + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2", + "__type__", string(model.MetricTypeHistogram), + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + }, + }, + { + name: "parseClassicHistograms=true/enableTypeAndUnitLabels=false", + parser: NewProtobufParser(inputBuf.Bytes(), true, false, labels.NewSymbolTable()), + expected: []parsedEntry{ + { + m: "go_build_info", + help: "Build information about the main Go module.", + }, + { + m: "go_build_info", + typ: model.MetricTypeGauge, + }, + { + m: "go_build_info\xffchecksum\xff\xffpath\xffgithub.com/prometheus/client_golang\xffversion\xff(devel)", v: 1, lset: labels.FromStrings( "__name__", "go_build_info", @@ -1300,51 +2160,55 @@ func TestProtobufParse(t *testing.T) { "version", "(devel)", ), }, - { // 3 + { m: "go_memstats_alloc_bytes_total", help: "Total number of bytes allocated, even if freed.", }, - { // 4 + { + m: "go_memstats_alloc_bytes_total", + unit: "bytes", + }, + { m: "go_memstats_alloc_bytes_total", typ: model.MetricTypeCounter, }, - { // 5 + { m: "go_memstats_alloc_bytes_total", v: 1.546544e+06, lset: labels.FromStrings( "__name__", "go_memstats_alloc_bytes_total", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, }, }, - { // 6 + { m: "something_untyped", help: "Just to test the untyped type.", }, - { // 7 + { m: "something_untyped", typ: model.MetricTypeUnknown, }, - { // 8 + { m: "something_untyped", - t: 1234567, + t: int64p(1234567), v: 42, lset: labels.FromStrings( "__name__", "something_untyped", ), }, - { // 9 + { m: "test_histogram", help: "Test histogram with many buckets removed to keep it manageable in size.", }, - { // 10 + { m: "test_histogram", typ: model.MetricTypeHistogram, }, - { // 11 + { m: "test_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -1365,79 +2229,79 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 12 + { m: "test_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_count", ), }, - { // 13 + { m: "test_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_histogram_sum", ), }, - { // 14 + { m: "test_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 15 + { m: "test_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 16 + { m: "test_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 17 + { m: "test_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_bucket", "le", "+Inf", ), }, - { // 18 + { m: "test_gauge_histogram", help: "Like test_histogram but as gauge histogram.", }, - { // 19 + { m: "test_gauge_histogram", typ: model.MetricTypeGaugeHistogram, }, - { // 20 + { m: "test_gauge_histogram", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, Count: 175, @@ -1459,79 +2323,79 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 21 + { m: "test_gauge_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_histogram_count", ), }, - { // 22 + { m: "test_gauge_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_gauge_histogram_sum", ), }, - { // 23 + { m: "test_gauge_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 24 + { m: "test_gauge_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 25 + { m: "test_gauge_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 26 + { m: "test_gauge_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_histogram_bucket", "le", "+Inf", ), }, - { // 27 + { m: "test_float_histogram", help: "Test float histogram with many buckets removed to keep it manageable in size.", }, - { // 28 + { m: "test_float_histogram", typ: model.MetricTypeHistogram, }, - { // 29 + { m: "test_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ Count: 175.0, ZeroCount: 2.0, @@ -1552,79 +2416,79 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 30 + { m: "test_float_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_float_histogram_count", ), }, - { // 31 + { m: "test_float_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_float_histogram_sum", ), }, - { // 32 + { m: "test_float_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 33 + { m: "test_float_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 34 + { m: "test_float_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 35 + { m: "test_float_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_float_histogram_bucket", "le", "+Inf", ), }, - { // 36 + { m: "test_gauge_float_histogram", help: "Like test_float_histogram but as gauge histogram.", }, - { // 37 + { m: "test_gauge_float_histogram", typ: model.MetricTypeGaugeHistogram, }, - { // 38 + { m: "test_gauge_float_histogram", - t: 1234568, + t: int64p(1234568), fhs: &histogram.FloatHistogram{ CounterResetHint: histogram.GaugeType, Count: 175.0, @@ -1646,91 +2510,91 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_gauge_float_histogram", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 39 + { m: "test_gauge_float_histogram_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_count", ), }, - { // 40 + { m: "test_gauge_float_histogram_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_sum", ), }, - { // 41 + { m: "test_gauge_float_histogram_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "-0.0004899999999999998", ), }, - { // 42 + { m: "test_gauge_float_histogram_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 43 + { m: "test_gauge_float_histogram_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 44 + { m: "test_gauge_float_histogram_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_gauge_float_histogram_bucket", "le", "+Inf", ), }, - { // 45 + { m: "test_histogram2", help: "Similar histogram as before but now without sparse buckets.", }, - { // 46 + { m: "test_histogram2", typ: model.MetricTypeHistogram, }, - { // 47 + { m: "test_histogram2_count", v: 175, lset: labels.FromStrings( "__name__", "test_histogram2_count", ), }, - { // 48 + { m: "test_histogram2_sum", v: 0.000828, lset: labels.FromStrings( "__name__", "test_histogram2_sum", ), }, - { // 49 + { m: "test_histogram2_bucket\xffle\xff-0.00048", v: 2, lset: labels.FromStrings( @@ -1738,29 +2602,29 @@ func TestProtobufParse(t *testing.T) { "le", "-0.00048", ), }, - { // 50 + { m: "test_histogram2_bucket\xffle\xff-0.00038", v: 4, lset: labels.FromStrings( "__name__", "test_histogram2_bucket", "le", "-0.00038", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, }, }, - { // 51 + { m: "test_histogram2_bucket\xffle\xff1.0", v: 16, lset: labels.FromStrings( "__name__", "test_histogram2_bucket", "le", "1.0", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, }, }, - { // 52 + { m: "test_histogram2_bucket\xffle\xff+Inf", v: 175, lset: labels.FromStrings( @@ -1768,15 +2632,75 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, - { // 53 + { + m: "test_histogram3", + help: "Similar histogram as before but now with integer buckets.", + }, + { + m: "test_histogram3", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram3_count", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_count", + ), + }, + { + m: "test_histogram3_sum", + v: 50, + lset: labels.FromStrings( + "__name__", "test_histogram3_sum", + ), + }, + { + m: "test_histogram3_bucket\xffle\xff-20.0", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "-20.0", + ), + }, + { + m: "test_histogram3_bucket\xffle\xff20.0", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "20.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram3_bucket\xffle\xff30.0", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "30.0", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: false}, + }, + }, + { + m: "test_histogram3_bucket\xffle\xff+Inf", + v: 6, + lset: labels.FromStrings( + "__name__", "test_histogram3_bucket", + "le", "+Inf", + ), + }, + { m: "test_histogram_family", help: "Test histogram metric family with two very simple histograms.", }, - { // 54 + { m: "test_histogram_family", typ: model.MetricTypeHistogram, }, - { // 55 + { m: "test_histogram_family\xfffoo\xffbar", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -1794,7 +2718,7 @@ func TestProtobufParse(t *testing.T) { "foo", "bar", ), }, - { // 56 + { m: "test_histogram_family_count\xfffoo\xffbar", v: 5, lset: labels.FromStrings( @@ -1802,7 +2726,7 @@ func TestProtobufParse(t *testing.T) { "foo", "bar", ), }, - { // 57 + { m: "test_histogram_family_sum\xfffoo\xffbar", v: 12.1, lset: labels.FromStrings( @@ -1810,7 +2734,7 @@ func TestProtobufParse(t *testing.T) { "foo", "bar", ), }, - { // 58 + { m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff1.1", v: 2, lset: labels.FromStrings( @@ -1819,7 +2743,7 @@ func TestProtobufParse(t *testing.T) { "le", "1.1", ), }, - { // 59 + { m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff2.2", v: 3, lset: labels.FromStrings( @@ -1828,7 +2752,7 @@ func TestProtobufParse(t *testing.T) { "le", "2.2", ), }, - { // 60 + { m: "test_histogram_family_bucket\xfffoo\xffbar\xffle\xff+Inf", v: 5, lset: labels.FromStrings( @@ -1837,7 +2761,7 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, - { // 61 + { m: "test_histogram_family\xfffoo\xffbaz", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -1855,7 +2779,7 @@ func TestProtobufParse(t *testing.T) { "foo", "baz", ), }, - { // 62 + { m: "test_histogram_family_count\xfffoo\xffbaz", v: 6, lset: labels.FromStrings( @@ -1863,7 +2787,7 @@ func TestProtobufParse(t *testing.T) { "foo", "baz", ), }, - { // 63 + { m: "test_histogram_family_sum\xfffoo\xffbaz", v: 13.1, lset: labels.FromStrings( @@ -1871,7 +2795,7 @@ func TestProtobufParse(t *testing.T) { "foo", "baz", ), }, - { // 64 + { m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff1.1", v: 1, lset: labels.FromStrings( @@ -1880,7 +2804,7 @@ func TestProtobufParse(t *testing.T) { "le", "1.1", ), }, - { // 65 + { m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff2.2", v: 5, lset: labels.FromStrings( @@ -1889,7 +2813,7 @@ func TestProtobufParse(t *testing.T) { "le", "2.2", ), }, - { // 66 + { m: "test_histogram_family_bucket\xfffoo\xffbaz\xffle\xff+Inf", v: 6, lset: labels.FromStrings( @@ -1898,15 +2822,15 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, - { // 67 + { m: "test_float_histogram_with_zerothreshold_zero", help: "Test float histogram with a zero threshold of zero.", }, - { // 68 + { m: "test_float_histogram_with_zerothreshold_zero", typ: model.MetricTypeHistogram, }, - { // 69 + { m: "test_float_histogram_with_zerothreshold_zero", fhs: &histogram.FloatHistogram{ Count: 5.0, @@ -1922,15 +2846,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_float_histogram_with_zerothreshold_zero", ), }, - { // 70 + { m: "rpc_durations_seconds", help: "RPC latency distributions.", }, - { // 71 + { m: "rpc_durations_seconds", typ: model.MetricTypeSummary, }, - { // 72 + { m: "rpc_durations_seconds_count\xffservice\xffexponential", v: 262, lset: labels.FromStrings( @@ -1938,7 +2862,7 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 73 + { m: "rpc_durations_seconds_sum\xffservice\xffexponential", v: 0.00025551262820703587, lset: labels.FromStrings( @@ -1946,8 +2870,8 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 74 - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + { + m: "rpc_durations_seconds\xffquantile\xff0.5\xffservice\xffexponential", v: 6.442786329648548e-07, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", @@ -1955,8 +2879,8 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 75 - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + { + m: "rpc_durations_seconds\xffquantile\xff0.9\xffservice\xffexponential", v: 1.9435742936658396e-06, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", @@ -1964,8 +2888,8 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 76 - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + { + m: "rpc_durations_seconds\xffquantile\xff0.99\xffservice\xffexponential", v: 4.0471608667037015e-06, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", @@ -1973,37 +2897,37 @@ func TestProtobufParse(t *testing.T) { "service", "exponential", ), }, - { // 77 + { m: "without_quantiles", help: "A summary without quantiles.", }, - { // 78 + { m: "without_quantiles", typ: model.MetricTypeSummary, }, - { // 79 + { m: "without_quantiles_count", v: 42, lset: labels.FromStrings( "__name__", "without_quantiles_count", ), }, - { // 80 + { m: "without_quantiles_sum", v: 1.234, lset: labels.FromStrings( "__name__", "without_quantiles_sum", ), }, - { // 78 + { m: "empty_histogram", help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", }, - { // 79 + { m: "empty_histogram", typ: model.MetricTypeHistogram, }, - { // 80 + { m: "empty_histogram", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -2014,57 +2938,57 @@ func TestProtobufParse(t *testing.T) { "__name__", "empty_histogram", ), }, - { // 81 + { m: "test_counter_with_createdtimestamp", help: "A counter with a created timestamp.", }, - { // 82 + { m: "test_counter_with_createdtimestamp", typ: model.MetricTypeCounter, }, - { // 83 + { m: "test_counter_with_createdtimestamp", v: 42, - ct: 1000, + ct: 1625851153146, lset: labels.FromStrings( "__name__", "test_counter_with_createdtimestamp", ), }, - { // 84 + { m: "test_summary_with_createdtimestamp", help: "A summary with a created timestamp.", }, - { // 85 + { m: "test_summary_with_createdtimestamp", typ: model.MetricTypeSummary, }, - { // 86 + { m: "test_summary_with_createdtimestamp_count", v: 42, - ct: 1000, + ct: 1625851153146, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_count", ), }, - { // 87 + { m: "test_summary_with_createdtimestamp_sum", v: 1.234, - ct: 1000, + ct: 1625851153146, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_sum", ), }, - { // 88 + { m: "test_histogram_with_createdtimestamp", help: "A histogram with a created timestamp.", }, - { // 89 + { m: "test_histogram_with_createdtimestamp", typ: model.MetricTypeHistogram, }, - { // 90 + { m: "test_histogram_with_createdtimestamp", - ct: 1000, + ct: 1625851153146, shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, PositiveSpans: []histogram.Span{}, @@ -2074,17 +2998,17 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_createdtimestamp", ), }, - { // 91 + { m: "test_gaugehistogram_with_createdtimestamp", help: "A gauge histogram with a created timestamp.", }, - { // 92 + { m: "test_gaugehistogram_with_createdtimestamp", typ: model.MetricTypeGaugeHistogram, }, - { // 93 + { m: "test_gaugehistogram_with_createdtimestamp", - ct: 1000, + ct: 1625851153146, shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, PositiveSpans: []histogram.Span{}, @@ -2094,17 +3018,17 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_gaugehistogram_with_createdtimestamp", ), }, - { // 94 + { m: "test_histogram_with_native_histogram_exemplars", help: "A histogram with native histogram exemplars.", }, - { // 95 + { m: "test_histogram_with_native_histogram_exemplars", typ: model.MetricTypeHistogram, }, - { // 96 + { m: "test_histogram_with_native_histogram_exemplars", - t: 1234568, + t: int64p(1234568), shs: &histogram.Histogram{ Count: 175, ZeroCount: 2, @@ -2125,69 +3049,156 @@ func TestProtobufParse(t *testing.T) { lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, }, - { // 97 + { m: "test_histogram_with_native_histogram_exemplars_count", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_count", ), }, - { // 98 + { m: "test_histogram_with_native_histogram_exemplars_sum", - t: 1234568, + t: int64p(1234568), v: 0.0008280461746287094, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_sum", ), }, - { // 99 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998", - t: 1234568, + t: int64p(1234568), v: 2, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "-0.0004899999999999998", ), }, - { // 100 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998", - t: 1234568, + t: int64p(1234568), v: 4, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "-0.0003899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 101 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998", - t: 1234568, + t: int64p(1234568), v: 16, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "-0.0002899999999999998", ), - e: []exemplar.Exemplar{ + es: []exemplar.Exemplar{ {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 102 + { m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf", - t: 1234568, + t: int64p(1234568), v: 175, lset: labels.FromStrings( "__name__", "test_histogram_with_native_histogram_exemplars_bucket", "le", "+Inf", ), }, + { + m: "test_histogram_with_native_histogram_exemplars2", + help: "Another histogram with native histogram exemplars.", + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + t: int64p(1234568), + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2", + ), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { + m: "test_histogram_with_native_histogram_exemplars2_count", + t: int64p(1234568), + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_count", + ), + }, + { + m: "test_histogram_with_native_histogram_exemplars2_sum", + t: int64p(1234568), + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_sum", + ), + }, + { + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0004899999999999998", + t: int64p(1234568), + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0004899999999999998", + ), + }, + { + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0003899999999999998", + t: int64p(1234568), + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0003899999999999998", + ), + }, + { + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0002899999999999998", + t: int64p(1234568), + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0002899999999999998", + ), + }, + { + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff+Inf", + t: int64p(1234568), + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "+Inf", + ), + }, }, }, } @@ -2195,94 +3206,11 @@ func TestProtobufParse(t *testing.T) { for _, scenario := range scenarios { t.Run(scenario.name, func(t *testing.T) { var ( - i int - res labels.Labels p = scenario.parser exp = scenario.expected ) - - for { - et, err := p.Next() - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - - switch et { - case EntrySeries: - m, ts, v := p.Series() - - var e exemplar.Exemplar - p.Metric(&res) - eFound := p.Exemplar(&e) - ct := p.CreatedTimestamp() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - if ts != nil { - require.Equal(t, exp[i].t, *ts, "i: %d", i) - } else { - require.Equal(t, int64(0), exp[i].t, "i: %d", i) - } - require.Equal(t, exp[i].v, v, "i: %d", i) - testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) - if len(exp[i].e) == 0 { - require.False(t, eFound, "i: %d", i) - } else { - require.True(t, eFound, "i: %d", i) - testutil.RequireEqual(t, exp[i].e[0], e, "i: %d", i) - require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i) - } - if exp[i].ct != 0 { - require.NotNilf(t, ct, "i: %d", i) - require.Equal(t, exp[i].ct, *ct, "i: %d", i) - } else { - require.Nilf(t, ct, "i: %d", i) - } - - case EntryHistogram: - m, ts, shs, fhs := p.Histogram() - p.Metric(&res) - require.Equal(t, exp[i].m, string(m), "i: %d", i) - if ts != nil { - require.Equal(t, exp[i].t, *ts, "i: %d", i) - } else { - require.Equal(t, int64(0), exp[i].t, "i: %d", i) - } - testutil.RequireEqual(t, exp[i].lset, res, "i: %d", i) - require.Equal(t, exp[i].m, string(m), "i: %d", i) - if shs != nil { - require.Equal(t, exp[i].shs, shs, "i: %d", i) - } else { - require.Equal(t, exp[i].fhs, fhs, "i: %d", i) - } - j := 0 - for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ { - testutil.RequireEqual(t, exp[i].e[j], e, "i: %d", i) - e = exemplar.Exemplar{} - } - require.Len(t, exp[i].e, j, "not enough exemplars found, i: %d", i) - - case EntryType: - m, typ := p.Type() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - require.Equal(t, exp[i].typ, typ, "i: %d", i) - - case EntryHelp: - m, h := p.Help() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - require.Equal(t, exp[i].help, string(h), "i: %d", i) - - case EntryUnit: - m, u := p.Unit() - require.Equal(t, exp[i].m, string(m), "i: %d", i) - require.Equal(t, exp[i].unit, string(u), "i: %d", i) - - case EntryComment: - require.Equal(t, exp[i].comment, string(p.Comment()), "i: %d", i) - } - - i++ - } - require.Len(t, exp, i) + got := testParse(t, p) + requireEntries(t, exp, got) }) } } diff --git a/model/textparse/testdata/1histogram.om.txt b/model/textparse/testdata/1histogram.om.txt new file mode 100644 index 0000000000..1876168355 --- /dev/null +++ b/model/textparse/testdata/1histogram.om.txt @@ -0,0 +1,45 @@ +# HELP golang_manual_histogram_seconds This is a histogram with manually selected parameters +# TYPE golang_manual_histogram_seconds histogram +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.005"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.01"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.025"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.05"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.1"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.25"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="0.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="1.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="2.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="5.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="10.0"} 1 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5001",le="+Inf"} 1 +golang_manual_histogram_seconds_sum{address="0.0.0.0",generation="20",port="5001"} 10.0 +golang_manual_histogram_seconds_count{address="0.0.0.0",generation="20",port="5001"} 1 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.005"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.01"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.025"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.05"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.1"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.25"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="0.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="1.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="2.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="5.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="10.0"} 1 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5002",le="+Inf"} 1 +golang_manual_histogram_seconds_sum{address="0.0.0.0",generation="20",port="5002"} 10.0 +golang_manual_histogram_seconds_count{address="0.0.0.0",generation="20",port="5002"} 1 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.005"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.01"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.025"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.05"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.1"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.25"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="0.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="1.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="2.5"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="5.0"} 0 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="10.0"} 1 +golang_manual_histogram_seconds_bucket{address="0.0.0.0",generation="20",port="5003",le="+Inf"} 1 +golang_manual_histogram_seconds_sum{address="0.0.0.0",generation="20",port="5003"} 10.0 +golang_manual_histogram_seconds_count{address="0.0.0.0",generation="20",port="5003"} 1 +# EOF \ No newline at end of file diff --git a/model/textparse/testdata/alltypes.237mfs.nometa.prom.txt b/model/textparse/testdata/alltypes.237mfs.nometa.prom.txt new file mode 100644 index 0000000000..8724a7b837 --- /dev/null +++ b/model/textparse/testdata/alltypes.237mfs.nometa.prom.txt @@ -0,0 +1,1858 @@ +go_gc_cycles_automatic_gc_cycles_total 190932 +go_gc_cycles_forced_gc_cycles_total 0 +go_gc_cycles_total_gc_cycles_total 190932 +go_gc_duration_seconds{quantile="0"} 7.6238e-05 +go_gc_duration_seconds{quantile="0.25"} 0.00010188 +go_gc_duration_seconds{quantile="0.5"} 0.000135819 +go_gc_duration_seconds{quantile="0.75"} 0.000156061 +go_gc_duration_seconds{quantile="1"} 0.002389378 +go_gc_duration_seconds_sum 44.985611511 +go_gc_duration_seconds_count 190932 +go_gc_gogc_percent 75 +go_gc_gomemlimit_bytes 9.03676723e+08 +go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 2.279966416e+09 +go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 1.9429106442e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 3.2158220609e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 4.2309744198e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 4.3418919481e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 4.374631622e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 4.3917578245e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 4.396605609e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 4.4007501305e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 4.4020325917e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 4.4036187548e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 4.4046288803e+10 +go_gc_heap_allocs_by_size_bytes_sum 5.937322571024e+12 +go_gc_heap_allocs_by_size_bytes_count 4.4046288803e+10 +go_gc_heap_allocs_bytes_total 5.937322571024e+12 +go_gc_heap_allocs_objects_total 4.4046288803e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 2.279951045e+09 +go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 1.9428965693e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 3.2157849717e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 4.2309204178e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 4.3418348856e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 4.3745739652e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 4.3916999773e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 4.3965477112e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 4.4006921621e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 4.4019746017e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 4.4035607466e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 4.4045708586e+10 +go_gc_heap_frees_by_size_bytes_sum 5.937239047736e+12 +go_gc_heap_frees_by_size_bytes_count 4.4045708586e+10 +go_gc_heap_frees_bytes_total 5.937239047736e+12 +go_gc_heap_frees_objects_total 4.4045708586e+10 +go_gc_heap_goal_bytes 1.0365948e+08 +go_gc_heap_live_bytes 5.8791024e+07 +go_gc_heap_objects_objects 580217 +go_gc_heap_tiny_allocs_objects_total 8.306064403e+09 +go_gc_limiter_last_enabled_gc_cycle 160896 +go_gc_pauses_seconds_bucket{le="6.399999999999999e-08"} 0 +go_gc_pauses_seconds_bucket{le="6.399999999999999e-07"} 0 +go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 137212 +go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 208425 +go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 376121 +go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 381798 +go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 381863 +go_gc_pauses_seconds_bucket{le="+Inf"} 381864 +go_gc_pauses_seconds_sum 20.343611904000003 +go_gc_pauses_seconds_count 381864 +go_gc_scan_globals_bytes 555824 +go_gc_scan_heap_bytes 4.0986192e+07 +go_gc_scan_stack_bytes 477760 +go_gc_scan_total_bytes 4.2019776e+07 +go_gc_stack_starting_size_bytes 4096 +go_goroutines 151 +go_info{version="go1.23.4"} 1 +go_memstats_alloc_bytes 8.3523288e+07 +go_memstats_alloc_bytes_total 5.937322571024e+12 +go_memstats_buck_hash_sys_bytes 9.35076e+06 +go_memstats_frees_total 5.2351772989e+10 +go_memstats_gc_sys_bytes 5.450544e+06 +go_memstats_heap_alloc_bytes 8.3523288e+07 +go_memstats_heap_idle_bytes 1.23691008e+08 +go_memstats_heap_inuse_bytes 9.56416e+07 +go_memstats_heap_objects 580217 +go_memstats_heap_released_bytes 1.05897984e+08 +go_memstats_heap_sys_bytes 2.19332608e+08 +go_memstats_last_gc_time_seconds 1.738949372347471e+09 +go_memstats_mallocs_total 5.2352353206e+10 +go_memstats_mcache_inuse_bytes 1200 +go_memstats_mcache_sys_bytes 15600 +go_memstats_mspan_inuse_bytes 1.21232e+06 +go_memstats_mspan_sys_bytes 2.13792e+06 +go_memstats_next_gc_bytes 1.0365948e+08 +go_memstats_other_sys_bytes 803696 +go_memstats_stack_inuse_bytes 2.818048e+06 +go_memstats_stack_sys_bytes 2.818048e+06 +go_memstats_sys_bytes 2.39909176e+08 +go_sched_gomaxprocs_threads 1 +go_sched_goroutines_goroutines 151 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 5.0101035e+07 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 7.4291762e+07 +go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 8.1386236e+07 +go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 8.2116161e+07 +go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 8.2802009e+07 +go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 8.6274195e+07 +go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 8.6724074e+07 +go_sched_latencies_seconds_bucket{le="+Inf"} 8.6726596e+07 +go_sched_latencies_seconds_sum 8266.758178496 +go_sched_latencies_seconds_count 8.6726596e+07 +go_sched_pauses_stopping_gc_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_stopping_gc_seconds_bucket{le="6.399999999999999e-07"} 236272 +go_sched_pauses_stopping_gc_seconds_bucket{le="7.167999999999999e-06"} 380799 +go_sched_pauses_stopping_gc_seconds_bucket{le="8.191999999999999e-05"} 381741 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.0009175039999999999"} 381807 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.010485759999999998"} 381862 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.11744051199999998"} 381864 +go_sched_pauses_stopping_gc_seconds_bucket{le="+Inf"} 381864 +go_sched_pauses_stopping_gc_seconds_sum 0.191211904 +go_sched_pauses_stopping_gc_seconds_count 381864 +go_sched_pauses_stopping_other_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="7.167999999999999e-06"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="8.191999999999999e-05"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.0009175039999999999"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.010485759999999998"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.11744051199999998"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="+Inf"} 0 +go_sched_pauses_stopping_other_seconds_sum 0 +go_sched_pauses_stopping_other_seconds_count 0 +go_sched_pauses_total_gc_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_total_gc_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_total_gc_seconds_bucket{le="7.167999999999999e-06"} 137212 +go_sched_pauses_total_gc_seconds_bucket{le="8.191999999999999e-05"} 208425 +go_sched_pauses_total_gc_seconds_bucket{le="0.0009175039999999999"} 376121 +go_sched_pauses_total_gc_seconds_bucket{le="0.010485759999999998"} 381798 +go_sched_pauses_total_gc_seconds_bucket{le="0.11744051199999998"} 381863 +go_sched_pauses_total_gc_seconds_bucket{le="+Inf"} 381864 +go_sched_pauses_total_gc_seconds_sum 20.343611904000003 +go_sched_pauses_total_gc_seconds_count 381864 +go_sched_pauses_total_other_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_total_other_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_total_other_seconds_bucket{le="7.167999999999999e-06"} 0 +go_sched_pauses_total_other_seconds_bucket{le="8.191999999999999e-05"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.0009175039999999999"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.010485759999999998"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.11744051199999998"} 0 +go_sched_pauses_total_other_seconds_bucket{le="+Inf"} 0 +go_sched_pauses_total_other_seconds_sum 0 +go_sched_pauses_total_other_seconds_count 0 +go_sync_mutex_wait_total_seconds_total 628.29966272 +go_threads 10 +net_conntrack_dialer_conn_attempted_total{dialer_name="alertmanager"} 2 +net_conntrack_dialer_conn_attempted_total{dialer_name="blackbox"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="caddy"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="cadvisor"} 2 +net_conntrack_dialer_conn_attempted_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_attempted_total{dialer_name="grafana"} 7 +net_conntrack_dialer_conn_attempted_total{dialer_name="node"} 5 +net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="random"} 4 +net_conntrack_dialer_conn_closed_total{dialer_name="alertmanager"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="blackbox"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="caddy"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="cadvisor"} 1 +net_conntrack_dialer_conn_closed_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="grafana"} 4 +net_conntrack_dialer_conn_closed_total{dialer_name="node"} 4 +net_conntrack_dialer_conn_closed_total{dialer_name="prometheus"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="random"} 0 +net_conntrack_dialer_conn_established_total{dialer_name="alertmanager"} 2 +net_conntrack_dialer_conn_established_total{dialer_name="blackbox"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="caddy"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="cadvisor"} 2 +net_conntrack_dialer_conn_established_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_established_total{dialer_name="grafana"} 5 +net_conntrack_dialer_conn_established_total{dialer_name="node"} 5 +net_conntrack_dialer_conn_established_total{dialer_name="prometheus"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="random"} 4 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="unknown"} 2 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="unknown"} 0 +net_conntrack_listener_conn_accepted_total{listener_name="http"} 561771 +net_conntrack_listener_conn_closed_total{listener_name="http"} 561723 +process_cpu_seconds_total 96501.42 +process_max_fds 65000 +process_network_receive_bytes_total 9.89686846606e+11 +process_network_transmit_bytes_total 3.743928187168e+12 +process_open_fds 124 +process_resident_memory_bytes 1.72822528e+08 +process_start_time_seconds 1.73806369023e+09 +process_virtual_memory_bytes 4.608667648e+09 +process_virtual_memory_max_bytes 1.8446744073709552e+19 +prometheus_api_notification_active_subscribers 2 +prometheus_api_notification_updates_dropped_total 0 +prometheus_api_notification_updates_sent_total 5 +prometheus_api_remote_read_queries 0 +prometheus_build_info{branch="HEAD",goarch="amd64",goos="linux",goversion="go1.23.4",revision="7086161a93b262aa0949dbf2aba15a5a7b13e0a3",tags="netgo,builtinassets,stringlabels",version="3.1.0"} 1 +prometheus_config_last_reload_success_timestamp_seconds 1.7380636976181264e+09 +prometheus_config_last_reload_successful 1 +prometheus_engine_queries 0 +prometheus_engine_queries_concurrent_max 20 +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.5"} 8.075e-05 +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.9"} 0.000917449 +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.99"} 0.009315769 +prometheus_engine_query_duration_seconds_sum{slice="inner_eval"} 12506.67007997419 +prometheus_engine_query_duration_seconds_count{slice="inner_eval"} 8.714071e+06 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.5"} 2.228e-05 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.9"} 6.2819e-05 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.99"} 0.000399637 +prometheus_engine_query_duration_seconds_sum{slice="prepare_time"} 490.326247638047 +prometheus_engine_query_duration_seconds_count{slice="prepare_time"} 8.714071e+06 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.5"} 4.628e-06 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.9"} 1.6082e-05 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.99"} 4.1174e-05 +prometheus_engine_query_duration_seconds_sum{slice="queue_time"} 8720.662071224393 +prometheus_engine_query_duration_seconds_count{slice="queue_time"} 1.7428526e+07 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.5"} 7.83e-07 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.9"} 1.994e-06 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.99"} 1.458e-05 +prometheus_engine_query_duration_seconds_sum{slice="result_sort"} 2.337879348999977 +prometheus_engine_query_duration_seconds_count{slice="result_sort"} 1.208154e+06 +prometheus_engine_query_log_enabled 0 +prometheus_engine_query_log_failures_total 0 +prometheus_engine_query_samples_total 9.1183747239e+10 +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.1"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.2"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.4"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="3"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="8"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="20"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="60"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="120"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 688 +prometheus_http_request_duration_seconds_sum{handler="/"} 0.026826932000000022 +prometheus_http_request_duration_seconds_count{handler="/"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.1"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.2"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.4"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="1"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="3"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="8"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="20"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="60"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="120"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="+Inf"} 29524 +prometheus_http_request_duration_seconds_sum{handler="/-/healthy"} 0.7400570460000002 +prometheus_http_request_duration_seconds_count{handler="/-/healthy"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/-/ready"} 0.005040123000000002 +prometheus_http_request_duration_seconds_count{handler="/-/ready"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.1"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.2"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.4"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="1"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="3"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="8"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="20"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="60"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="120"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="+Inf"} 48 +prometheus_http_request_duration_seconds_sum{handler="/alerts"} 0.011801602999999999 +prometheus_http_request_duration_seconds_count{handler="/alerts"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.1"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.2"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.4"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="1"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="3"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="8"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="20"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="60"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="120"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="+Inf"} 27 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/*path"} 0.001724389 +prometheus_http_request_duration_seconds_count{handler="/api/v1/*path"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.1"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.2"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.4"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="1"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="3"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="8"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="20"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="60"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="120"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="+Inf"} 8 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/alertmanagers"} 0.042492975999999995 +prometheus_http_request_duration_seconds_count{handler="/api/v1/alertmanagers"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.1"} 14630 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.2"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.4"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="1"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="3"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="8"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="20"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="60"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="120"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="+Inf"} 14635 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/alerts"} 19.028669391999912 +prometheus_http_request_duration_seconds_count{handler="/api/v1/alerts"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/format_query"} 0.023786675 +prometheus_http_request_duration_seconds_count{handler="/api/v1/format_query"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.1"} 17773 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.2"} 17860 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.4"} 17939 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="1"} 17970 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="3"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="8"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="20"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="60"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="120"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 17971 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/label/:name/values"} 99.95326355699925 +prometheus_http_request_duration_seconds_count{handler="/api/v1/label/:name/values"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.1"} 4905 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.2"} 4912 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.4"} 4916 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="1"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="3"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="8"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="20"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="60"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="120"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="+Inf"} 4921 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/labels"} 12.963155722000025 +prometheus_http_request_duration_seconds_count{handler="/api/v1/labels"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.1"} 2073 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.2"} 2091 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.4"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="1"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="3"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="8"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="20"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="60"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="120"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="+Inf"} 2093 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/metadata"} 16.90794258600001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/metadata"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.1"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.2"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.4"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="1"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="3"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="8"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="20"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="60"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="120"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="+Inf"} 12 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/notifications"} 0.029363403 +prometheus_http_request_duration_seconds_count{handler="/api/v1/notifications"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.1"} 3 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.4"} 11 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="1"} 29 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="3"} 79 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="8"} 116 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="20"} 187 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="60"} 224 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="120"} 234 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="+Inf"} 263 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/notifications/live"} 85150.78366682903 +prometheus_http_request_duration_seconds_count{handler="/api/v1/notifications/live"} 263 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.1"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.2"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.4"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="1"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="3"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="8"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="20"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="60"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="120"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="+Inf"} 265 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/parse_query"} 0.7265021480000003 +prometheus_http_request_duration_seconds_count{handler="/api/v1/parse_query"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.1"} 3051 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.2"} 3082 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.4"} 3173 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="1"} 3213 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="3"} 3216 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="8"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="20"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="60"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="120"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="+Inf"} 3217 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query"} 70.23571750499987 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.1"} 831 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.2"} 839 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.4"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="1"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="3"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="8"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="20"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="60"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="120"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="+Inf"} 841 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_exemplars"} 3.6569132899999968 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query_exemplars"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.1"} 116338 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.2"} 116784 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.4"} 117924 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="1"} 118147 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="3"} 118155 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="8"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="20"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="60"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="120"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="+Inf"} 118157 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_range"} 832.1240439999854 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query_range"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.1"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.2"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.4"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="1"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="3"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="8"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="20"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="60"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="120"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="+Inf"} 16 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/read"} 0.07308876400000001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/read"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.1"} 88415 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.2"} 88627 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.4"} 88659 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="1"} 88661 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="3"} 88661 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="8"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="20"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="60"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="120"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="+Inf"} 88662 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/rules"} 629.5237672670012 +prometheus_http_request_duration_seconds_count{handler="/api/v1/rules"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.1"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.2"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.4"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="1"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="3"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="8"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="20"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="60"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="120"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="+Inf"} 142 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/scrape_pools"} 0.17501777799999996 +prometheus_http_request_duration_seconds_count{handler="/api/v1/scrape_pools"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.1"} 1217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.2"} 1236 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.4"} 1244 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="1"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="3"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="8"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="20"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="60"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="120"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="+Inf"} 1246 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/series"} 19.03228310300001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/series"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.1"} 4412 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.2"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.4"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="1"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="3"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="8"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="20"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="60"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="120"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="+Inf"} 4413 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/buildinfo"} 5.222974727000008 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.1"} 84669 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.2"} 84716 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.4"} 84721 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="1"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="3"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="8"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="20"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="60"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="120"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="+Inf"} 84723 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/config"} 228.93575751199964 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/config"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.1"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.2"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.4"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="1"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="3"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="8"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="20"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="60"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="120"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="+Inf"} 32 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/flags"} 0.093302815 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/flags"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.1"} 862 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.2"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.4"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="1"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="3"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="8"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="20"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="60"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="120"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="+Inf"} 863 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/runtimeinfo"} 8.009312817000001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.1"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.2"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.4"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="1"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="3"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="8"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="20"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="60"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="120"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="+Inf"} 94 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/tsdb"} 1.3378732679999994 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/tsdb"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/walreplay"} 0.034643267 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/walreplay"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.1"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.2"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.4"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="1"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="3"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="8"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="20"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="60"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="120"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="+Inf"} 191 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/targets"} 0.5432347889999999 +prometheus_http_request_duration_seconds_count{handler="/api/v1/targets"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.1"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.2"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.4"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="1"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="3"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="8"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="20"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="60"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="120"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="+Inf"} 18 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/targets/metadata"} 0.043789973 +prometheus_http_request_duration_seconds_count{handler="/api/v1/targets/metadata"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.1"} 385 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.2"} 481 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.4"} 528 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="1"} 612 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="3"} 634 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="8"} 640 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="20"} 641 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="60"} 642 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="120"} 643 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="+Inf"} 643 +prometheus_http_request_duration_seconds_sum{handler="/assets/*filepath"} 280.0660388799997 +prometheus_http_request_duration_seconds_count{handler="/assets/*filepath"} 643 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.1"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.2"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.4"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="1"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="3"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="8"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="20"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="60"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="120"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="+Inf"} 103 +prometheus_http_request_duration_seconds_sum{handler="/classic/static/*filepath"} 0.006113046000000001 +prometheus_http_request_duration_seconds_count{handler="/classic/static/*filepath"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.1"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.2"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.4"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="1"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="3"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="8"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="20"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="60"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="120"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="+Inf"} 13 +prometheus_http_request_duration_seconds_sum{handler="/config"} 0.0024490289999999997 +prometheus_http_request_duration_seconds_count{handler="/config"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.1"} 33 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.2"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.4"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="1"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="3"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="8"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="20"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="60"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="120"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="+Inf"} 34 +prometheus_http_request_duration_seconds_sum{handler="/consoles/*filepath"} 0.5689515199999999 +prometheus_http_request_duration_seconds_count{handler="/consoles/*filepath"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/debug/*subpath"} 0.086499352 +prometheus_http_request_duration_seconds_count{handler="/debug/*subpath"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.1"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.2"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.4"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="1"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="3"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="8"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="20"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="60"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="120"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="+Inf"} 177 +prometheus_http_request_duration_seconds_sum{handler="/favicon.ico"} 0.05591882500000002 +prometheus_http_request_duration_seconds_count{handler="/favicon.ico"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.1"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.2"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.4"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="1"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="3"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="8"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="20"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="60"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="120"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="+Inf"} 770 +prometheus_http_request_duration_seconds_sum{handler="/favicon.svg"} 0.3058455699999999 +prometheus_http_request_duration_seconds_count{handler="/favicon.svg"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/federate"} 0.05996980699999999 +prometheus_http_request_duration_seconds_count{handler="/federate"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.1"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.2"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.4"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="1"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="3"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="8"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="20"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="60"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="120"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="+Inf"} 9 +prometheus_http_request_duration_seconds_sum{handler="/flags"} 0.001586781 +prometheus_http_request_duration_seconds_count{handler="/flags"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.1"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.2"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.4"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="1"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="3"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="8"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="20"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="60"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="120"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="+Inf"} 751 +prometheus_http_request_duration_seconds_sum{handler="/graph"} 0.032583203000000005 +prometheus_http_request_duration_seconds_count{handler="/graph"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.1"} 3.988654e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.2"} 4.052461e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.4"} 4.057771e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="1"} 4.058809e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="3"} 4.059071e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="8"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="20"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="60"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="120"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="+Inf"} 4.059092e+06 +prometheus_http_request_duration_seconds_sum{handler="/metrics"} 107584.12561354278 +prometheus_http_request_duration_seconds_count{handler="/metrics"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.1"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.2"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.4"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="1"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="3"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="8"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="20"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="60"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="120"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="+Inf"} 1774 +prometheus_http_request_duration_seconds_sum{handler="/query"} 0.5220201759999993 +prometheus_http_request_duration_seconds_count{handler="/query"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.1"} 8672 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.2"} 8672 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.4"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="1"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="3"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="8"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="20"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="60"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="120"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="+Inf"} 8673 +prometheus_http_request_duration_seconds_sum{handler="/rules"} 2.776021043000005 +prometheus_http_request_duration_seconds_count{handler="/rules"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.1"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.2"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.4"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="1"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="3"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="8"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="20"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="60"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="120"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="+Inf"} 20 +prometheus_http_request_duration_seconds_sum{handler="/service-discovery"} 0.004057062 +prometheus_http_request_duration_seconds_count{handler="/service-discovery"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.1"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.2"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.4"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="1"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="3"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="8"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="20"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="60"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="120"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="+Inf"} 46 +prometheus_http_request_duration_seconds_sum{handler="/status"} 0.010107473 +prometheus_http_request_duration_seconds_count{handler="/status"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.1"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.2"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.4"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="1"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="3"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="8"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="20"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="60"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="120"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="+Inf"} 39 +prometheus_http_request_duration_seconds_sum{handler="/targets"} 0.009001180000000001 +prometheus_http_request_duration_seconds_count{handler="/targets"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/tsdb-status"} 0.018204165 +prometheus_http_request_duration_seconds_count{handler="/tsdb-status"} 49 +prometheus_http_requests_total{code="200",handler="/"} 0 +prometheus_http_requests_total{code="200",handler="/-/healthy"} 29524 +prometheus_http_requests_total{code="200",handler="/-/quit"} 0 +prometheus_http_requests_total{code="200",handler="/-/ready"} 49 +prometheus_http_requests_total{code="200",handler="/-/reload"} 0 +prometheus_http_requests_total{code="200",handler="/alertmanager-discovery"} 0 +prometheus_http_requests_total{code="200",handler="/alerts"} 48 +prometheus_http_requests_total{code="200",handler="/api/v1/*path"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/clean_tombstones"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/delete_series"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/snapshot"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/alertmanagers"} 8 +prometheus_http_requests_total{code="200",handler="/api/v1/alerts"} 14635 +prometheus_http_requests_total{code="200",handler="/api/v1/format_query"} 3 +prometheus_http_requests_total{code="200",handler="/api/v1/label/:name/values"} 14679 +prometheus_http_requests_total{code="200",handler="/api/v1/labels"} 4915 +prometheus_http_requests_total{code="200",handler="/api/v1/metadata"} 2093 +prometheus_http_requests_total{code="200",handler="/api/v1/notifications"} 12 +prometheus_http_requests_total{code="200",handler="/api/v1/notifications/live"} 4529 +prometheus_http_requests_total{code="200",handler="/api/v1/otlp/v1/metrics"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/parse_query"} 247 +prometheus_http_requests_total{code="200",handler="/api/v1/query"} 326411 +prometheus_http_requests_total{code="200",handler="/api/v1/query_exemplars"} 841 +prometheus_http_requests_total{code="200",handler="/api/v1/query_range"} 1.183734e+06 +prometheus_http_requests_total{code="200",handler="/api/v1/read"} 16 +prometheus_http_requests_total{code="200",handler="/api/v1/rules"} 88658 +prometheus_http_requests_total{code="200",handler="/api/v1/scrape_pools"} 142 +prometheus_http_requests_total{code="200",handler="/api/v1/series"} 1235 +prometheus_http_requests_total{code="200",handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_requests_total{code="200",handler="/api/v1/status/config"} 84722 +prometheus_http_requests_total{code="200",handler="/api/v1/status/flags"} 32 +prometheus_http_requests_total{code="200",handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_requests_total{code="200",handler="/api/v1/status/tsdb"} 94 +prometheus_http_requests_total{code="200",handler="/api/v1/status/walreplay"} 49 +prometheus_http_requests_total{code="200",handler="/api/v1/targets"} 191 +prometheus_http_requests_total{code="200",handler="/api/v1/targets/metadata"} 16 +prometheus_http_requests_total{code="200",handler="/api/v1/write"} 0 +prometheus_http_requests_total{code="200",handler="/assets/*filepath"} 2213 +prometheus_http_requests_total{code="200",handler="/classic/static/*filepath"} 0 +prometheus_http_requests_total{code="200",handler="/config"} 13 +prometheus_http_requests_total{code="200",handler="/consoles/*filepath"} 17 +prometheus_http_requests_total{code="200",handler="/debug/*subpath"} 3 +prometheus_http_requests_total{code="200",handler="/favicon.ico"} 0 +prometheus_http_requests_total{code="200",handler="/favicon.svg"} 769 +prometheus_http_requests_total{code="200",handler="/federate"} 4 +prometheus_http_requests_total{code="200",handler="/flags"} 9 +prometheus_http_requests_total{code="200",handler="/graph"} 0 +prometheus_http_requests_total{code="200",handler="/manifest.json"} 0 +prometheus_http_requests_total{code="200",handler="/metrics"} 4.059092e+06 +prometheus_http_requests_total{code="200",handler="/query"} 1774 +prometheus_http_requests_total{code="200",handler="/rules"} 8673 +prometheus_http_requests_total{code="200",handler="/service-discovery"} 20 +prometheus_http_requests_total{code="200",handler="/status"} 46 +prometheus_http_requests_total{code="200",handler="/targets"} 39 +prometheus_http_requests_total{code="200",handler="/tsdb-status"} 49 +prometheus_http_requests_total{code="200",handler="/version"} 0 +prometheus_http_requests_total{code="204",handler="/api/v1/*path"} 27 +prometheus_http_requests_total{code="204",handler="/api/v1/notifications/live"} 5 +prometheus_http_requests_total{code="301",handler="/debug/*subpath"} 1 +prometheus_http_requests_total{code="302",handler="/"} 688 +prometheus_http_requests_total{code="302",handler="/graph"} 751 +prometheus_http_requests_total{code="400",handler="/api/v1/format_query"} 1 +prometheus_http_requests_total{code="400",handler="/api/v1/label/:name/values"} 3292 +prometheus_http_requests_total{code="400",handler="/api/v1/labels"} 6 +prometheus_http_requests_total{code="400",handler="/api/v1/parse_query"} 18 +prometheus_http_requests_total{code="400",handler="/api/v1/query"} 155 +prometheus_http_requests_total{code="400",handler="/api/v1/query_range"} 263 +prometheus_http_requests_total{code="400",handler="/api/v1/rules"} 4 +prometheus_http_requests_total{code="400",handler="/api/v1/series"} 11 +prometheus_http_requests_total{code="400",handler="/api/v1/targets/metadata"} 2 +prometheus_http_requests_total{code="404",handler="/assets/*filepath"} 12 +prometheus_http_requests_total{code="404",handler="/classic/static/*filepath"} 103 +prometheus_http_requests_total{code="404",handler="/consoles/*filepath"} 17 +prometheus_http_requests_total{code="404",handler="/favicon.ico"} 177 +prometheus_http_requests_total{code="416",handler="/favicon.svg"} 1 +prometheus_http_requests_total{code="422",handler="/api/v1/query"} 114 +prometheus_http_requests_total{code="422",handler="/api/v1/query_range"} 31 +prometheus_http_requests_total{code="499",handler="/api/v1/query"} 7 +prometheus_http_requests_total{code="499",handler="/api/v1/query_range"} 20 +prometheus_http_requests_total{code="503",handler="/api/v1/query"} 7 +prometheus_http_requests_total{code="503",handler="/api/v1/query_range"} 4 +prometheus_http_requests_total{code="503",handler="/api/v1/status/config"} 1 +prometheus_http_response_size_bytes_bucket{handler="/",le="100"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="10000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="100000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+06"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+07"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+08"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+09"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="+Inf"} 688 +prometheus_http_response_size_bytes_sum{handler="/"} 19952 +prometheus_http_response_size_bytes_count{handler="/"} 688 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="10000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+06"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+07"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+08"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+09"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="+Inf"} 29524 +prometheus_http_response_size_bytes_sum{handler="/-/healthy"} 885720 +prometheus_http_response_size_bytes_count{handler="/-/healthy"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/-/ready"} 1372 +prometheus_http_response_size_bytes_count{handler="/-/ready"} 49 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="10000"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="100000"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+06"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+07"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+08"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+09"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="+Inf"} 48 +prometheus_http_response_size_bytes_sum{handler="/alerts"} 84096 +prometheus_http_response_size_bytes_count{handler="/alerts"} 48 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="100"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="10000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="100000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+06"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+07"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+08"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+09"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="+Inf"} 27 +prometheus_http_response_size_bytes_sum{handler="/api/v1/*path"} 0 +prometheus_http_response_size_bytes_count{handler="/api/v1/*path"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="10000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="100000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+06"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+07"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+08"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+09"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="+Inf"} 8 +prometheus_http_response_size_bytes_sum{handler="/api/v1/alertmanagers"} 1064 +prometheus_http_response_size_bytes_count{handler="/api/v1/alertmanagers"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="10000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="100000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+06"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+07"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+08"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+09"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="+Inf"} 14635 +prometheus_http_response_size_bytes_sum{handler="/api/v1/alerts"} 1.0260926e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/alerts"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="10000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/api/v1/format_query"} 495 +prometheus_http_response_size_bytes_count{handler="/api/v1/format_query"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100"} 8167 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1000"} 15939 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="10000"} 17901 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100000"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+06"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+07"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+08"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+09"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 17971 +prometheus_http_response_size_bytes_sum{handler="/api/v1/label/:name/values"} 2.0009454e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/label/:name/values"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100"} 102 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1000"} 4908 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="10000"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100000"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+06"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+07"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+08"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+09"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="+Inf"} 4921 +prometheus_http_response_size_bytes_sum{handler="/api/v1/labels"} 3.16046e+06 +prometheus_http_response_size_bytes_count{handler="/api/v1/labels"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100"} 170 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1000"} 1368 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="10000"} 1368 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100000"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+06"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+07"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+08"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+09"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="+Inf"} 2093 +prometheus_http_response_size_bytes_sum{handler="/api/v1/metadata"} 1.5950362e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/metadata"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="100"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="10000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="100000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+06"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+07"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+08"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+09"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="+Inf"} 12 +prometheus_http_response_size_bytes_sum{handler="/api/v1/notifications"} 360 +prometheus_http_response_size_bytes_count{handler="/api/v1/notifications"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="100"} 4529 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="10000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="100000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+06"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+07"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+08"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+09"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="+Inf"} 4534 +prometheus_http_response_size_bytes_sum{handler="/api/v1/notifications/live"} 1365 +prometheus_http_response_size_bytes_count{handler="/api/v1/notifications/live"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="10000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="100000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+06"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+07"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+08"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+09"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="+Inf"} 265 +prometheus_http_response_size_bytes_sum{handler="/api/v1/parse_query"} 67620 +prometheus_http_response_size_bytes_count{handler="/api/v1/parse_query"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100"} 25147 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1000"} 325005 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="10000"} 325143 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100000"} 326635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+06"} 326692 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+07"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+08"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+09"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="+Inf"} 326694 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query"} 2.34481756e+08 +prometheus_http_response_size_bytes_count{handler="/api/v1/query"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="100"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="10000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="100000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+06"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+07"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+08"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+09"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="+Inf"} 841 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query_exemplars"} 50460 +prometheus_http_response_size_bytes_count{handler="/api/v1/query_exemplars"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100"} 69552 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1000"} 917165 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="10000"} 1.152103e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100000"} 1.183391e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+06"} 1.183621e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+07"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+08"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+09"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="+Inf"} 1.184052e+06 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query_range"} 1.869092376e+09 +prometheus_http_response_size_bytes_count{handler="/api/v1/query_range"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="10000"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="100000"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+06"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+07"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+08"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+09"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="+Inf"} 16 +prometheus_http_response_size_bytes_sum{handler="/api/v1/read"} 50800 +prometheus_http_response_size_bytes_count{handler="/api/v1/read"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="10000"} 88656 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100000"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+06"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+07"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+08"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+09"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="+Inf"} 88662 +prometheus_http_response_size_bytes_sum{handler="/api/v1/rules"} 7.01925022e+08 +prometheus_http_response_size_bytes_count{handler="/api/v1/rules"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="10000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="100000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+06"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+07"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+08"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+09"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="+Inf"} 142 +prometheus_http_response_size_bytes_sum{handler="/api/v1/scrape_pools"} 18318 +prometheus_http_response_size_bytes_count{handler="/api/v1/scrape_pools"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100"} 1011 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1000"} 1233 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="10000"} 1244 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100000"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+06"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+07"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+08"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+09"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="+Inf"} 1246 +prometheus_http_response_size_bytes_sum{handler="/api/v1/series"} 235829 +prometheus_http_response_size_bytes_count{handler="/api/v1/series"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="10000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="100000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+06"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+07"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+08"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+09"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="+Inf"} 4413 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/buildinfo"} 807579 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="10000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="100000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+06"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+07"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+08"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+09"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="+Inf"} 84723 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/config"} 6.3710963e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/config"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="10000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="100000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+06"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+07"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+08"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+09"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="+Inf"} 32 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/flags"} 31968 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/flags"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="10000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="100000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+06"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+07"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+08"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+09"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="+Inf"} 863 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/runtimeinfo"} 240400 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1000"} 92 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="10000"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="100000"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+06"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+07"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+08"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+09"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="+Inf"} 94 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/tsdb"} 66441 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/tsdb"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="100"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/walreplay"} 3381 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/walreplay"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1000"} 184 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="10000"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="100000"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+06"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+07"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+08"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+09"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="+Inf"} 191 +prometheus_http_response_size_bytes_sum{handler="/api/v1/targets"} 191150 +prometheus_http_response_size_bytes_count{handler="/api/v1/targets"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="10000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="100000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+06"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+07"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+08"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+09"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="+Inf"} 18 +prometheus_http_response_size_bytes_sum{handler="/api/v1/targets/metadata"} 2736 +prometheus_http_response_size_bytes_count{handler="/api/v1/targets/metadata"} 18 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="100"} 12 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="10000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="100000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+06"} 958 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+07"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+08"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+09"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="+Inf"} 2225 +prometheus_http_response_size_bytes_sum{handler="/assets/*filepath"} 3.525958804e+09 +prometheus_http_response_size_bytes_count{handler="/assets/*filepath"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="100"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="10000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="100000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+06"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+07"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+08"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+09"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="+Inf"} 103 +prometheus_http_response_size_bytes_sum{handler="/classic/static/*filepath"} 1957 +prometheus_http_response_size_bytes_count{handler="/classic/static/*filepath"} 103 +prometheus_http_response_size_bytes_bucket{handler="/config",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/config",le="10000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="100000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+06"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+07"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+08"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+09"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="+Inf"} 13 +prometheus_http_response_size_bytes_sum{handler="/config"} 22776 +prometheus_http_response_size_bytes_count{handler="/config"} 13 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="100"} 17 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1000"} 17 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="10000"} 31 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="100000"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+06"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+07"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+08"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+09"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="+Inf"} 34 +prometheus_http_response_size_bytes_sum{handler="/consoles/*filepath"} 175556 +prometheus_http_response_size_bytes_count{handler="/consoles/*filepath"} 34 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1000"} 1 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="10000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/debug/*subpath"} 7062 +prometheus_http_response_size_bytes_count{handler="/debug/*subpath"} 4 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="100"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="10000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="100000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+06"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+07"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+08"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+09"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="+Inf"} 177 +prometheus_http_response_size_bytes_sum{handler="/favicon.ico"} 3363 +prometheus_http_response_size_bytes_count{handler="/favicon.ico"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1000"} 1 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="10000"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="100000"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+06"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+07"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+08"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+09"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="+Inf"} 770 +prometheus_http_response_size_bytes_sum{handler="/favicon.svg"} 2.135541e+06 +prometheus_http_response_size_bytes_count{handler="/favicon.svg"} 770 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="100"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1000"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="10000"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/federate"} 28439 +prometheus_http_response_size_bytes_count{handler="/federate"} 4 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="10000"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="100000"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+06"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+07"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+08"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+09"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="+Inf"} 9 +prometheus_http_response_size_bytes_sum{handler="/flags"} 15768 +prometheus_http_response_size_bytes_count{handler="/flags"} 9 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="100"} 130 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1000"} 744 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="10000"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="100000"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+06"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+07"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+08"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+09"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="+Inf"} 751 +prometheus_http_response_size_bytes_sum{handler="/graph"} 164775 +prometheus_http_response_size_bytes_count{handler="/graph"} 751 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="10000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100000"} 4.059088e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+06"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+07"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+08"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+09"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="+Inf"} 4.059092e+06 +prometheus_http_response_size_bytes_sum{handler="/metrics"} 8.2515439707e+10 +prometheus_http_response_size_bytes_count{handler="/metrics"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/query",le="10000"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="100000"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+06"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+07"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+08"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+09"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="+Inf"} 1774 +prometheus_http_response_size_bytes_sum{handler="/query"} 3.108048e+06 +prometheus_http_response_size_bytes_count{handler="/query"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="10000"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="100000"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+06"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+07"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+08"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+09"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="+Inf"} 8673 +prometheus_http_response_size_bytes_sum{handler="/rules"} 1.5195096e+07 +prometheus_http_response_size_bytes_count{handler="/rules"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="10000"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="100000"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+06"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+07"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+08"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+09"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="+Inf"} 20 +prometheus_http_response_size_bytes_sum{handler="/service-discovery"} 35040 +prometheus_http_response_size_bytes_count{handler="/service-discovery"} 20 +prometheus_http_response_size_bytes_bucket{handler="/status",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/status",le="10000"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="100000"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+06"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+07"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+08"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+09"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="+Inf"} 46 +prometheus_http_response_size_bytes_sum{handler="/status"} 80592 +prometheus_http_response_size_bytes_count{handler="/status"} 46 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="10000"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="100000"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+06"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+07"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+08"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+09"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="+Inf"} 39 +prometheus_http_response_size_bytes_sum{handler="/targets"} 68328 +prometheus_http_response_size_bytes_count{handler="/targets"} 39 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/tsdb-status"} 85848 +prometheus_http_response_size_bytes_count{handler="/tsdb-status"} 49 +prometheus_notifications_alertmanagers_discovered 1 +prometheus_notifications_dropped_total 0 +prometheus_notifications_errors_total{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 0 +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.5"} 0.001566044 +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.9"} 0.003927931 +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.99"} 0.013928135 +prometheus_notifications_latency_seconds_sum{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 194.15032606200046 +prometheus_notifications_latency_seconds_count{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 75180 +prometheus_notifications_queue_capacity 10000 +prometheus_notifications_queue_length 0 +prometheus_notifications_sent_total{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 141616 +prometheus_ready 1 +prometheus_remote_storage_exemplars_in_total 4.959738e+06 +prometheus_remote_storage_highest_timestamp_in_seconds 1.738949375e+09 +prometheus_remote_storage_histograms_in_total 0 +prometheus_remote_storage_samples_in_total 6.00447296e+08 +prometheus_remote_storage_string_interner_zero_reference_releases_total 0 +prometheus_rule_evaluation_duration_seconds{quantile="0.5"} 0.000214623 +prometheus_rule_evaluation_duration_seconds{quantile="0.9"} 0.001456135 +prometheus_rule_evaluation_duration_seconds{quantile="0.99"} 0.008111814 +prometheus_rule_evaluation_duration_seconds_sum 5209.704794862625 +prometheus_rule_evaluation_duration_seconds_count 7.203456e+06 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 118092 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 118090 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1.4761e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 1.476125e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 649495 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 649484 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 1.358035e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 1.358035e+06 +prometheus_rule_group_duration_seconds{quantile="0.01"} 0.000735928 +prometheus_rule_group_duration_seconds{quantile="0.05"} 0.000818857 +prometheus_rule_group_duration_seconds{quantile="0.5"} 0.004852081 +prometheus_rule_group_duration_seconds{quantile="0.9"} 0.022897759 +prometheus_rule_group_duration_seconds{quantile="0.99"} 0.069327797 +prometheus_rule_group_duration_seconds_sum 5335.451440133042 +prometheus_rule_group_duration_seconds_count 472359 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 15 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 1 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 59046 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 59045 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.000754015 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0.001104624 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.022040842 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.048928087 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0.002766703 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0.002218466 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0.010245447 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.009232393 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 2 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 2 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 10 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 10 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 11 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 11 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 1.738949373753179e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 1.738949366900781e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1.7389493632087085e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 1.7389493666743164e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 1.7389493675395968e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 1.7389493623381927e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 1.738949366856423e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 1.738949369917499e+09 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.002032625 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 9.1853e-05 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.000166088 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.000108127 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 6.408e-06 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 2.621e-06 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 8.4979e-05 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.000104444 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.000685176 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0.001035099 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.021769662 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.042017156 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0.00260535 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0.002069356 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0.009905481 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.009056893 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 2 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 2 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 25 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 25 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 11 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 11 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 23 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 23 +prometheus_sd_azure_cache_hit_total 0 +prometheus_sd_azure_failures_total 0 +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_failures_total 0 +prometheus_sd_discovered_targets{config="alertmanager",name="scrape"} 1 +prometheus_sd_discovered_targets{config="blackbox",name="scrape"} 1 +prometheus_sd_discovered_targets{config="caddy",name="scrape"} 1 +prometheus_sd_discovered_targets{config="cadvisor",name="scrape"} 1 +prometheus_sd_discovered_targets{config="config-0",name="notify"} 1 +prometheus_sd_discovered_targets{config="grafana",name="scrape"} 1 +prometheus_sd_discovered_targets{config="node",name="scrape"} 1 +prometheus_sd_discovered_targets{config="prometheus",name="scrape"} 1 +prometheus_sd_discovered_targets{config="random",name="scrape"} 4 +prometheus_sd_dns_lookup_failures_total 0 +prometheus_sd_dns_lookups_total 0 +prometheus_sd_failed_configs{name="notify"} 0 +prometheus_sd_failed_configs{name="scrape"} 0 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/alertmanager.yml"} 1.701295557e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/cadvisor.yml"} 1.705433682e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/node.yml"} 1.701295553e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/random.yml"} 1.579263729e+09 +prometheus_sd_file_read_errors_total 0 +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} 9.5384e-05 +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} 0.000275679 +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} 0.000275679 +prometheus_sd_file_scan_duration_seconds_sum 4.751347856999997 +prometheus_sd_file_scan_duration_seconds_count 11812 +prometheus_sd_file_watcher_errors_total 0 +prometheus_sd_http_failures_total 0 +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +prometheus_sd_kubernetes_failures_total 0 +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.5"} NaN +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.9"} NaN +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.99"} NaN +prometheus_sd_kuma_fetch_duration_seconds_sum 0 +prometheus_sd_kuma_fetch_duration_seconds_count 0 +prometheus_sd_kuma_fetch_failures_total 0 +prometheus_sd_kuma_fetch_skipped_updates_total 0 +prometheus_sd_linode_failures_total 0 +prometheus_sd_nomad_failures_total 0 +prometheus_sd_received_updates_total{name="notify"} 2 +prometheus_sd_received_updates_total{name="scrape"} 11820 +prometheus_sd_updates_delayed_total{name="notify"} 0 +prometheus_sd_updates_delayed_total{name="scrape"} 0 +prometheus_sd_updates_total{name="notify"} 1 +prometheus_sd_updates_total{name="scrape"} 2953 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14.982591232 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14.997567414 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 14.999977915 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15.000793403 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15.017607167 +prometheus_target_interval_length_seconds_sum{interval="15s"} 9.742237453453667e+06 +prometheus_target_interval_length_seconds_count{interval="15s"} 649482 +prometheus_target_metadata_cache_bytes{scrape_job="alertmanager"} 6817 +prometheus_target_metadata_cache_bytes{scrape_job="blackbox"} 661 +prometheus_target_metadata_cache_bytes{scrape_job="caddy"} 2365 +prometheus_target_metadata_cache_bytes{scrape_job="cadvisor"} 4547 +prometheus_target_metadata_cache_bytes{scrape_job="grafana"} 37608 +prometheus_target_metadata_cache_bytes{scrape_job="node"} 13900 +prometheus_target_metadata_cache_bytes{scrape_job="prometheus"} 20265 +prometheus_target_metadata_cache_bytes{scrape_job="random"} 792 +prometheus_target_metadata_cache_entries{scrape_job="alertmanager"} 86 +prometheus_target_metadata_cache_entries{scrape_job="blackbox"} 13 +prometheus_target_metadata_cache_entries{scrape_job="caddy"} 47 +prometheus_target_metadata_cache_entries{scrape_job="cadvisor"} 93 +prometheus_target_metadata_cache_entries{scrape_job="grafana"} 373 +prometheus_target_metadata_cache_entries{scrape_job="node"} 293 +prometheus_target_metadata_cache_entries{scrape_job="prometheus"} 237 +prometheus_target_metadata_cache_entries{scrape_job="random"} 16 +prometheus_target_scrape_pool_exceeded_label_limits_total 0 +prometheus_target_scrape_pool_exceeded_target_limit_total 0 +prometheus_target_scrape_pool_reloads_failed_total 0 +prometheus_target_scrape_pool_reloads_total 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="alertmanager"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="blackbox"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="caddy"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="cadvisor"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="grafana"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="node"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="prometheus"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="random"} 0 +prometheus_target_scrape_pool_sync_total{scrape_job="alertmanager"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="blackbox"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="caddy"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="cadvisor"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="grafana"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="node"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="random"} 2953 +prometheus_target_scrape_pool_target_limit{scrape_job="alertmanager"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="blackbox"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="caddy"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="cadvisor"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="grafana"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="node"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="prometheus"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="random"} 0 +prometheus_target_scrape_pool_targets{scrape_job="alertmanager"} 1 +prometheus_target_scrape_pool_targets{scrape_job="blackbox"} 1 +prometheus_target_scrape_pool_targets{scrape_job="caddy"} 1 +prometheus_target_scrape_pool_targets{scrape_job="cadvisor"} 1 +prometheus_target_scrape_pool_targets{scrape_job="grafana"} 1 +prometheus_target_scrape_pool_targets{scrape_job="node"} 1 +prometheus_target_scrape_pool_targets{scrape_job="prometheus"} 1 +prometheus_target_scrape_pool_targets{scrape_job="random"} 4 +prometheus_target_scrape_pools_failed_total 0 +prometheus_target_scrape_pools_total 8 +prometheus_target_scrapes_cache_flush_forced_total 0 +prometheus_target_scrapes_exceeded_body_size_limit_total 0 +prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total 0 +prometheus_target_scrapes_exceeded_sample_limit_total 0 +prometheus_target_scrapes_exemplar_out_of_order_total 0 +prometheus_target_scrapes_sample_duplicate_timestamp_total 0 +prometheus_target_scrapes_sample_out_of_bounds_total 0 +prometheus_target_scrapes_sample_out_of_order_total 455 +prometheus_target_sync_failed_total{scrape_job="alertmanager"} 0 +prometheus_target_sync_failed_total{scrape_job="blackbox"} 0 +prometheus_target_sync_failed_total{scrape_job="caddy"} 0 +prometheus_target_sync_failed_total{scrape_job="cadvisor"} 0 +prometheus_target_sync_failed_total{scrape_job="grafana"} 0 +prometheus_target_sync_failed_total{scrape_job="node"} 0 +prometheus_target_sync_failed_total{scrape_job="prometheus"} 0 +prometheus_target_sync_failed_total{scrape_job="random"} 0 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.01"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.05"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.5"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.9"} 0.000141485 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.99"} 0.000141485 +prometheus_target_sync_length_seconds_sum{scrape_job="alertmanager"} 0.13103036 +prometheus_target_sync_length_seconds_count{scrape_job="alertmanager"} 2953 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.01"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.05"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.5"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.9"} 6.2134e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.99"} 6.2134e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="blackbox"} 0.6044201539999996 +prometheus_target_sync_length_seconds_count{scrape_job="blackbox"} 2953 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.01"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.05"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.5"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.9"} 7.8256e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.99"} 7.8256e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="caddy"} 0.10369844599999971 +prometheus_target_sync_length_seconds_count{scrape_job="caddy"} 2953 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.01"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.05"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.5"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.9"} 4.2337e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.99"} 4.2337e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="cadvisor"} 0.10489659999999998 +prometheus_target_sync_length_seconds_count{scrape_job="cadvisor"} 2953 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.01"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.05"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.5"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.9"} 1.7284e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.99"} 1.7284e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="grafana"} 0.09031192700000017 +prometheus_target_sync_length_seconds_count{scrape_job="grafana"} 2953 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.01"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.05"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.5"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.9"} 7.416e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.99"} 7.416e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="node"} 0.11539821299999993 +prometheus_target_sync_length_seconds_count{scrape_job="node"} 2953 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 1.961e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 1.961e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.10655758600000016 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 2953 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.01"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.05"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.5"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.9"} 4.8586e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.99"} 4.8586e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="random"} 0.20406449899999993 +prometheus_target_sync_length_seconds_count{scrape_job="random"} 2953 +prometheus_template_text_expansion_failures_total 0 +prometheus_template_text_expansions_total 2.126277e+06 +prometheus_treecache_watcher_goroutines 0 +prometheus_treecache_zookeeper_failures_total 0 +prometheus_tsdb_blocks_loaded 17 +prometheus_tsdb_checkpoint_creations_failed_total 0 +prometheus_tsdb_checkpoint_creations_total 62 +prometheus_tsdb_checkpoint_deletions_failed_total 0 +prometheus_tsdb_checkpoint_deletions_total 62 +prometheus_tsdb_clean_start 1 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="100"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="400"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1600"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6400"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="25600"} 676 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="102400"} 1555 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="409600"} 2379 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1.6384e+06"} 34068 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6.5536e+06"} 4.819758e+06 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="2.62144e+07"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_range_seconds_sum 8.917524773366e+12 +prometheus_tsdb_compaction_chunk_range_seconds_count 4.861956e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="4"} 1407 +prometheus_tsdb_compaction_chunk_samples_bucket{le="6"} 1544 +prometheus_tsdb_compaction_chunk_samples_bucket{le="9"} 1881 +prometheus_tsdb_compaction_chunk_samples_bucket{le="13.5"} 2065 +prometheus_tsdb_compaction_chunk_samples_bucket{le="20.25"} 2782 +prometheus_tsdb_compaction_chunk_samples_bucket{le="30.375"} 4342 +prometheus_tsdb_compaction_chunk_samples_bucket{le="45.5625"} 6180 +prometheus_tsdb_compaction_chunk_samples_bucket{le="68.34375"} 11518 +prometheus_tsdb_compaction_chunk_samples_bucket{le="102.515625"} 13890 +prometheus_tsdb_compaction_chunk_samples_bucket{le="153.7734375"} 4.810155e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="230.66015625"} 4.86058e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="345.990234375"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_samples_sum 5.86000708e+08 +prometheus_tsdb_compaction_chunk_samples_count 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="32"} 1233 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="48"} 156238 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="72"} 2.006456e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="108"} 3.568405e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="162"} 3.835144e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="243"} 4.034591e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="364.5"} 4.505646e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="546.75"} 4.69694e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="820.125"} 4.78551e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1230.1875"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1845.28125"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="2767.921875"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_sum 6.81852972e+08 +prometheus_tsdb_compaction_chunk_size_bytes_count 4.861956e+06 +prometheus_tsdb_compaction_duration_seconds_bucket{le="1"} 61 +prometheus_tsdb_compaction_duration_seconds_bucket{le="2"} 155 +prometheus_tsdb_compaction_duration_seconds_bucket{le="4"} 180 +prometheus_tsdb_compaction_duration_seconds_bucket{le="8"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="16"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="32"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="64"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="128"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="256"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="512"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="1024"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="2048"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="4096"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="8192"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="+Inf"} 183 +prometheus_tsdb_compaction_duration_seconds_sum 254.9095696899999 +prometheus_tsdb_compaction_duration_seconds_count 183 +prometheus_tsdb_compaction_populating_block 0 +prometheus_tsdb_compactions_failed_total 0 +prometheus_tsdb_compactions_skipped_total 0 +prometheus_tsdb_compactions_total 183 +prometheus_tsdb_compactions_triggered_total 14855 +prometheus_tsdb_data_replay_duration_seconds 5.550163617 +prometheus_tsdb_exemplar_exemplars_appended_total 0 +prometheus_tsdb_exemplar_exemplars_in_storage 0 +prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds 0 +prometheus_tsdb_exemplar_max_exemplars 0 +prometheus_tsdb_exemplar_out_of_order_exemplars_total 0 +prometheus_tsdb_exemplar_series_with_exemplars_in_storage 0 +prometheus_tsdb_head_active_appenders 0 +prometheus_tsdb_head_chunks 31476 +prometheus_tsdb_head_chunks_created_total 4.893432e+06 +prometheus_tsdb_head_chunks_removed_total 4.861956e+06 +prometheus_tsdb_head_chunks_storage_size_bytes 7.237299e+06 +prometheus_tsdb_head_gc_duration_seconds_sum 4.773801686000001 +prometheus_tsdb_head_gc_duration_seconds_count 123 +prometheus_tsdb_head_max_time 1.738949375191e+12 +prometheus_tsdb_head_max_time_seconds 1.738949375e+09 +prometheus_tsdb_head_min_time 1.738944000171e+12 +prometheus_tsdb_head_min_time_seconds 1.738944e+09 +prometheus_tsdb_head_out_of_order_samples_appended_total{type="float"} 0 +prometheus_tsdb_head_out_of_order_samples_appended_total{type="histogram"} 0 +prometheus_tsdb_head_samples_appended_total{type="float"} 5.85543187e+08 +prometheus_tsdb_head_samples_appended_total{type="histogram"} 0 +prometheus_tsdb_head_series 10720 +prometheus_tsdb_head_series_created_total 18541 +prometheus_tsdb_head_series_not_found_total 0 +prometheus_tsdb_head_series_removed_total 7821 +prometheus_tsdb_head_truncations_failed_total 0 +prometheus_tsdb_head_truncations_total 123 +prometheus_tsdb_isolation_high_watermark 7.852949e+06 +prometheus_tsdb_isolation_low_watermark 7.852949e+06 +prometheus_tsdb_lowest_timestamp 1.73618640004e+12 +prometheus_tsdb_lowest_timestamp_seconds 1.7361864e+09 +prometheus_tsdb_mmap_chunk_corruptions_total 0 +prometheus_tsdb_mmap_chunks_total 4.851264e+06 +prometheus_tsdb_out_of_bound_samples_total{type="float"} 0 +prometheus_tsdb_out_of_order_samples_total{type="float"} 517 +prometheus_tsdb_out_of_order_samples_total{type="histogram"} 0 +prometheus_tsdb_reloads_failures_total 0 +prometheus_tsdb_reloads_total 14822 +prometheus_tsdb_retention_limit_bytes 0 +prometheus_tsdb_retention_limit_seconds 2.6784e+06 +prometheus_tsdb_size_retentions_total 0 +prometheus_tsdb_snapshot_replay_error_total 0 +prometheus_tsdb_storage_blocks_bytes 2.762863592e+09 +prometheus_tsdb_symbol_table_size_bytes 10616 +prometheus_tsdb_time_retentions_total 5 +prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="+Inf"} 0 +prometheus_tsdb_tombstone_cleanup_seconds_sum 0 +prometheus_tsdb_tombstone_cleanup_seconds_count 0 +prometheus_tsdb_too_old_samples_total{type="float"} 0 +prometheus_tsdb_vertical_compactions_total 0 +prometheus_tsdb_wal_completed_pages_total 109271 +prometheus_tsdb_wal_corruptions_total 0 +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.5"} NaN +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.9"} NaN +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.99"} NaN +prometheus_tsdb_wal_fsync_duration_seconds_sum 4.842524568000002 +prometheus_tsdb_wal_fsync_duration_seconds_count 123 +prometheus_tsdb_wal_page_flushes_total 2.293951e+06 +prometheus_tsdb_wal_segment_current 24726 +prometheus_tsdb_wal_storage_size_bytes 6.9168385e+07 +prometheus_tsdb_wal_truncate_duration_seconds_sum 121.61954577099996 +prometheus_tsdb_wal_truncate_duration_seconds_count 62 +prometheus_tsdb_wal_truncations_failed_total 0 +prometheus_tsdb_wal_truncations_total 62 +prometheus_tsdb_wal_writes_failed_total 0 +prometheus_web_federation_errors_total 0 +prometheus_web_federation_warnings_total 0 +promhttp_metric_handler_requests_in_flight 1 +promhttp_metric_handler_requests_total{code="200"} 4.059092e+06 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# EOF diff --git a/model/textparse/testdata/alltypes.237mfs.prom.txt b/model/textparse/testdata/alltypes.237mfs.prom.txt new file mode 100644 index 0000000000..01138055cf --- /dev/null +++ b/model/textparse/testdata/alltypes.237mfs.prom.txt @@ -0,0 +1,2332 @@ +# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime. Sourced from /gc/cycles/automatic:gc-cycles +# TYPE go_gc_cycles_automatic_gc_cycles_total counter +go_gc_cycles_automatic_gc_cycles_total 190932 +# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application. Sourced from /gc/cycles/forced:gc-cycles +# TYPE go_gc_cycles_forced_gc_cycles_total counter +go_gc_cycles_forced_gc_cycles_total 0 +# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles. Sourced from /gc/cycles/total:gc-cycles +# TYPE go_gc_cycles_total_gc_cycles_total counter +go_gc_cycles_total_gc_cycles_total 190932 +# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 7.6238e-05 +go_gc_duration_seconds{quantile="0.25"} 0.00010188 +go_gc_duration_seconds{quantile="0.5"} 0.000135819 +go_gc_duration_seconds{quantile="0.75"} 0.000156061 +go_gc_duration_seconds{quantile="1"} 0.002389378 +go_gc_duration_seconds_sum 44.985611511 +go_gc_duration_seconds_count 190932 +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent +# TYPE go_gc_gogc_percent gauge +go_gc_gogc_percent 75 +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes +# TYPE go_gc_gomemlimit_bytes gauge +go_gc_gomemlimit_bytes 9.03676723e+08 +# HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/allocs-by-size:bytes +# TYPE go_gc_heap_allocs_by_size_bytes histogram +go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 2.279966416e+09 +go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 1.9429106442e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 3.2158220609e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 4.2309744198e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 4.3418919481e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 4.374631622e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 4.3917578245e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 4.396605609e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 4.4007501305e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 4.4020325917e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 4.4036187548e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 4.4046288803e+10 +go_gc_heap_allocs_by_size_bytes_sum 5.937322571024e+12 +go_gc_heap_allocs_by_size_bytes_count 4.4046288803e+10 +# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application. Sourced from /gc/heap/allocs:bytes +# TYPE go_gc_heap_allocs_bytes_total counter +go_gc_heap_allocs_bytes_total 5.937322571024e+12 +# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/allocs:objects +# TYPE go_gc_heap_allocs_objects_total counter +go_gc_heap_allocs_objects_total 4.4046288803e+10 +# HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/frees-by-size:bytes +# TYPE go_gc_heap_frees_by_size_bytes histogram +go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 2.279951045e+09 +go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 1.9428965693e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 3.2157849717e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 4.2309204178e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 4.3418348856e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 4.3745739652e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 4.3916999773e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 4.3965477112e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 4.4006921621e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 4.4019746017e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 4.4035607466e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 4.4045708586e+10 +go_gc_heap_frees_by_size_bytes_sum 5.937239047736e+12 +go_gc_heap_frees_by_size_bytes_count 4.4045708586e+10 +# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector. Sourced from /gc/heap/frees:bytes +# TYPE go_gc_heap_frees_bytes_total counter +go_gc_heap_frees_bytes_total 5.937239047736e+12 +# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/frees:objects +# TYPE go_gc_heap_frees_objects_total counter +go_gc_heap_frees_objects_total 4.4045708586e+10 +# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle. Sourced from /gc/heap/goal:bytes +# TYPE go_gc_heap_goal_bytes gauge +go_gc_heap_goal_bytes 1.0365948e+08 +# HELP go_gc_heap_live_bytes Heap memory occupied by live objects that were marked by the previous GC. Sourced from /gc/heap/live:bytes +# TYPE go_gc_heap_live_bytes gauge +go_gc_heap_live_bytes 5.8791024e+07 +# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory. Sourced from /gc/heap/objects:objects +# TYPE go_gc_heap_objects_objects gauge +go_gc_heap_objects_objects 580217 +# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size. Sourced from /gc/heap/tiny/allocs:objects +# TYPE go_gc_heap_tiny_allocs_objects_total counter +go_gc_heap_tiny_allocs_objects_total 8.306064403e+09 +# HELP go_gc_limiter_last_enabled_gc_cycle GC cycle the last time the GC CPU limiter was enabled. This metric is useful for diagnosing the root cause of an out-of-memory error, because the limiter trades memory for CPU time when the GC's CPU time gets too high. This is most likely to occur with use of SetMemoryLimit. The first GC cycle is cycle 1, so a value of 0 indicates that it was never enabled. Sourced from /gc/limiter/last-enabled:gc-cycle +# TYPE go_gc_limiter_last_enabled_gc_cycle gauge +go_gc_limiter_last_enabled_gc_cycle 160896 +# HELP go_gc_pauses_seconds Deprecated. Prefer the identical /sched/pauses/total/gc:seconds. Sourced from /gc/pauses:seconds +# TYPE go_gc_pauses_seconds histogram +go_gc_pauses_seconds_bucket{le="6.399999999999999e-08"} 0 +go_gc_pauses_seconds_bucket{le="6.399999999999999e-07"} 0 +go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 137212 +go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 208425 +go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 376121 +go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 381798 +go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 381863 +go_gc_pauses_seconds_bucket{le="+Inf"} 381864 +go_gc_pauses_seconds_sum 20.343611904000003 +go_gc_pauses_seconds_count 381864 +# HELP go_gc_scan_globals_bytes The total amount of global variable space that is scannable. Sourced from /gc/scan/globals:bytes +# TYPE go_gc_scan_globals_bytes gauge +go_gc_scan_globals_bytes 555824 +# HELP go_gc_scan_heap_bytes The total amount of heap space that is scannable. Sourced from /gc/scan/heap:bytes +# TYPE go_gc_scan_heap_bytes gauge +go_gc_scan_heap_bytes 4.0986192e+07 +# HELP go_gc_scan_stack_bytes The number of bytes of stack that were scanned last GC cycle. Sourced from /gc/scan/stack:bytes +# TYPE go_gc_scan_stack_bytes gauge +go_gc_scan_stack_bytes 477760 +# HELP go_gc_scan_total_bytes The total amount space that is scannable. Sum of all metrics in /gc/scan. Sourced from /gc/scan/total:bytes +# TYPE go_gc_scan_total_bytes gauge +go_gc_scan_total_bytes 4.2019776e+07 +# HELP go_gc_stack_starting_size_bytes The stack size of new goroutines. Sourced from /gc/stack/starting-size:bytes +# TYPE go_gc_stack_starting_size_bytes gauge +go_gc_stack_starting_size_bytes 4096 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 151 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.23.4"} 1 +# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 8.3523288e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 5.937322571024e+12 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 9.35076e+06 +# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 5.2351772989e+10 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 5.450544e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 8.3523288e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 1.23691008e+08 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 9.56416e+07 +# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 580217 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 1.05897984e+08 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 2.19332608e+08 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.738949372347471e+09 +# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 5.2352353206e+10 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 1200 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 15600 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 1.21232e+06 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 2.13792e+06 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 1.0365948e+08 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 803696 +# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 2.818048e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 2.818048e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 2.39909176e+08 +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads +# TYPE go_sched_gomaxprocs_threads gauge +go_sched_gomaxprocs_threads 1 +# HELP go_sched_goroutines_goroutines Count of live goroutines. Sourced from /sched/goroutines:goroutines +# TYPE go_sched_goroutines_goroutines gauge +go_sched_goroutines_goroutines 151 +# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. Bucket counts increase monotonically. Sourced from /sched/latencies:seconds +# TYPE go_sched_latencies_seconds histogram +go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 5.0101035e+07 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 7.4291762e+07 +go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 8.1386236e+07 +go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 8.2116161e+07 +go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 8.2802009e+07 +go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 8.6274195e+07 +go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 8.6724074e+07 +go_sched_latencies_seconds_bucket{le="+Inf"} 8.6726596e+07 +go_sched_latencies_seconds_sum 8266.758178496 +go_sched_latencies_seconds_count 8.6726596e+07 +# HELP go_sched_pauses_stopping_gc_seconds Distribution of individual GC-related stop-the-world stopping latencies. This is the time it takes from deciding to stop the world until all Ps are stopped. This is a subset of the total GC-related stop-the-world time (/sched/pauses/total/gc:seconds). During this time, some threads may be executing. Bucket counts increase monotonically. Sourced from /sched/pauses/stopping/gc:seconds +# TYPE go_sched_pauses_stopping_gc_seconds histogram +go_sched_pauses_stopping_gc_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_stopping_gc_seconds_bucket{le="6.399999999999999e-07"} 236272 +go_sched_pauses_stopping_gc_seconds_bucket{le="7.167999999999999e-06"} 380799 +go_sched_pauses_stopping_gc_seconds_bucket{le="8.191999999999999e-05"} 381741 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.0009175039999999999"} 381807 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.010485759999999998"} 381862 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.11744051199999998"} 381864 +go_sched_pauses_stopping_gc_seconds_bucket{le="+Inf"} 381864 +go_sched_pauses_stopping_gc_seconds_sum 0.191211904 +go_sched_pauses_stopping_gc_seconds_count 381864 +# HELP go_sched_pauses_stopping_other_seconds Distribution of individual non-GC-related stop-the-world stopping latencies. This is the time it takes from deciding to stop the world until all Ps are stopped. This is a subset of the total non-GC-related stop-the-world time (/sched/pauses/total/other:seconds). During this time, some threads may be executing. Bucket counts increase monotonically. Sourced from /sched/pauses/stopping/other:seconds +# TYPE go_sched_pauses_stopping_other_seconds histogram +go_sched_pauses_stopping_other_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="7.167999999999999e-06"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="8.191999999999999e-05"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.0009175039999999999"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.010485759999999998"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.11744051199999998"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="+Inf"} 0 +go_sched_pauses_stopping_other_seconds_sum 0 +go_sched_pauses_stopping_other_seconds_count 0 +# HELP go_sched_pauses_total_gc_seconds Distribution of individual GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (this is measured directly in /sched/pauses/stopping/gc:seconds), during which some threads may still be running. Bucket counts increase monotonically. Sourced from /sched/pauses/total/gc:seconds +# TYPE go_sched_pauses_total_gc_seconds histogram +go_sched_pauses_total_gc_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_total_gc_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_total_gc_seconds_bucket{le="7.167999999999999e-06"} 137212 +go_sched_pauses_total_gc_seconds_bucket{le="8.191999999999999e-05"} 208425 +go_sched_pauses_total_gc_seconds_bucket{le="0.0009175039999999999"} 376121 +go_sched_pauses_total_gc_seconds_bucket{le="0.010485759999999998"} 381798 +go_sched_pauses_total_gc_seconds_bucket{le="0.11744051199999998"} 381863 +go_sched_pauses_total_gc_seconds_bucket{le="+Inf"} 381864 +go_sched_pauses_total_gc_seconds_sum 20.343611904000003 +go_sched_pauses_total_gc_seconds_count 381864 +# HELP go_sched_pauses_total_other_seconds Distribution of individual non-GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (measured directly in /sched/pauses/stopping/other:seconds). Bucket counts increase monotonically. Sourced from /sched/pauses/total/other:seconds +# TYPE go_sched_pauses_total_other_seconds histogram +go_sched_pauses_total_other_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_total_other_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_total_other_seconds_bucket{le="7.167999999999999e-06"} 0 +go_sched_pauses_total_other_seconds_bucket{le="8.191999999999999e-05"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.0009175039999999999"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.010485759999999998"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.11744051199999998"} 0 +go_sched_pauses_total_other_seconds_bucket{le="+Inf"} 0 +go_sched_pauses_total_other_seconds_sum 0 +go_sched_pauses_total_other_seconds_count 0 +# HELP go_sync_mutex_wait_total_seconds_total Approximate cumulative time goroutines have spent blocked on a sync.Mutex, sync.RWMutex, or runtime-internal lock. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data. Sourced from /sync/mutex/wait/total:seconds +# TYPE go_sync_mutex_wait_total_seconds_total counter +go_sync_mutex_wait_total_seconds_total 628.29966272 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 10 +# HELP net_conntrack_dialer_conn_attempted_total Total number of connections attempted by the given dialer a given name. +# TYPE net_conntrack_dialer_conn_attempted_total counter +net_conntrack_dialer_conn_attempted_total{dialer_name="alertmanager"} 2 +net_conntrack_dialer_conn_attempted_total{dialer_name="blackbox"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="caddy"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="cadvisor"} 2 +net_conntrack_dialer_conn_attempted_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_attempted_total{dialer_name="grafana"} 7 +net_conntrack_dialer_conn_attempted_total{dialer_name="node"} 5 +net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="random"} 4 +# HELP net_conntrack_dialer_conn_closed_total Total number of connections closed which originated from the dialer of a given name. +# TYPE net_conntrack_dialer_conn_closed_total counter +net_conntrack_dialer_conn_closed_total{dialer_name="alertmanager"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="blackbox"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="caddy"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="cadvisor"} 1 +net_conntrack_dialer_conn_closed_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="grafana"} 4 +net_conntrack_dialer_conn_closed_total{dialer_name="node"} 4 +net_conntrack_dialer_conn_closed_total{dialer_name="prometheus"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="random"} 0 +# HELP net_conntrack_dialer_conn_established_total Total number of connections successfully established by the given dialer a given name. +# TYPE net_conntrack_dialer_conn_established_total counter +net_conntrack_dialer_conn_established_total{dialer_name="alertmanager"} 2 +net_conntrack_dialer_conn_established_total{dialer_name="blackbox"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="caddy"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="cadvisor"} 2 +net_conntrack_dialer_conn_established_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_established_total{dialer_name="grafana"} 5 +net_conntrack_dialer_conn_established_total{dialer_name="node"} 5 +net_conntrack_dialer_conn_established_total{dialer_name="prometheus"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="random"} 4 +# HELP net_conntrack_dialer_conn_failed_total Total number of connections failed to dial by the dialer a given name. +# TYPE net_conntrack_dialer_conn_failed_total counter +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="unknown"} 2 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="unknown"} 0 +# HELP net_conntrack_listener_conn_accepted_total Total number of connections opened to the listener of a given name. +# TYPE net_conntrack_listener_conn_accepted_total counter +net_conntrack_listener_conn_accepted_total{listener_name="http"} 561771 +# HELP net_conntrack_listener_conn_closed_total Total number of connections closed that were made to the listener of a given name. +# TYPE net_conntrack_listener_conn_closed_total counter +net_conntrack_listener_conn_closed_total{listener_name="http"} 561723 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 96501.42 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 65000 +# HELP process_network_receive_bytes_total Number of bytes received by the process over the network. +# TYPE process_network_receive_bytes_total counter +process_network_receive_bytes_total 9.89686846606e+11 +# HELP process_network_transmit_bytes_total Number of bytes sent by the process over the network. +# TYPE process_network_transmit_bytes_total counter +process_network_transmit_bytes_total 3.743928187168e+12 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 124 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 1.72822528e+08 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.73806369023e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 4.608667648e+09 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes 1.8446744073709552e+19 +# HELP prometheus_api_notification_active_subscribers The current number of active notification subscribers. +# TYPE prometheus_api_notification_active_subscribers gauge +prometheus_api_notification_active_subscribers 2 +# HELP prometheus_api_notification_updates_dropped_total Total number of notification updates dropped. +# TYPE prometheus_api_notification_updates_dropped_total counter +prometheus_api_notification_updates_dropped_total 0 +# HELP prometheus_api_notification_updates_sent_total Total number of notification updates sent. +# TYPE prometheus_api_notification_updates_sent_total counter +prometheus_api_notification_updates_sent_total 5 +# HELP prometheus_api_remote_read_queries The current number of remote read queries being executed or waiting. +# TYPE prometheus_api_remote_read_queries gauge +prometheus_api_remote_read_queries 0 +# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which prometheus was built, and the goos and goarch for the build. +# TYPE prometheus_build_info gauge +prometheus_build_info{branch="HEAD",goarch="amd64",goos="linux",goversion="go1.23.4",revision="7086161a93b262aa0949dbf2aba15a5a7b13e0a3",tags="netgo,builtinassets,stringlabels",version="3.1.0"} 1 +# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. +# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge +prometheus_config_last_reload_success_timestamp_seconds 1.7380636976181264e+09 +# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. +# TYPE prometheus_config_last_reload_successful gauge +prometheus_config_last_reload_successful 1 +# HELP prometheus_engine_queries The current number of queries being executed or waiting. +# TYPE prometheus_engine_queries gauge +prometheus_engine_queries 0 +# HELP prometheus_engine_queries_concurrent_max The max number of concurrent queries. +# TYPE prometheus_engine_queries_concurrent_max gauge +prometheus_engine_queries_concurrent_max 20 +# HELP prometheus_engine_query_duration_seconds Query timings +# TYPE prometheus_engine_query_duration_seconds summary +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.5"} 8.075e-05 +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.9"} 0.000917449 +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.99"} 0.009315769 +prometheus_engine_query_duration_seconds_sum{slice="inner_eval"} 12506.67007997419 +prometheus_engine_query_duration_seconds_count{slice="inner_eval"} 8.714071e+06 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.5"} 2.228e-05 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.9"} 6.2819e-05 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.99"} 0.000399637 +prometheus_engine_query_duration_seconds_sum{slice="prepare_time"} 490.326247638047 +prometheus_engine_query_duration_seconds_count{slice="prepare_time"} 8.714071e+06 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.5"} 4.628e-06 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.9"} 1.6082e-05 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.99"} 4.1174e-05 +prometheus_engine_query_duration_seconds_sum{slice="queue_time"} 8720.662071224393 +prometheus_engine_query_duration_seconds_count{slice="queue_time"} 1.7428526e+07 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.5"} 7.83e-07 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.9"} 1.994e-06 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.99"} 1.458e-05 +prometheus_engine_query_duration_seconds_sum{slice="result_sort"} 2.337879348999977 +prometheus_engine_query_duration_seconds_count{slice="result_sort"} 1.208154e+06 +# HELP prometheus_engine_query_log_enabled State of the query log. +# TYPE prometheus_engine_query_log_enabled gauge +prometheus_engine_query_log_enabled 0 +# HELP prometheus_engine_query_log_failures_total The number of query log failures. +# TYPE prometheus_engine_query_log_failures_total counter +prometheus_engine_query_log_failures_total 0 +# HELP prometheus_engine_query_samples_total The total number of samples loaded by all queries. +# TYPE prometheus_engine_query_samples_total counter +prometheus_engine_query_samples_total 9.1183747239e+10 +# HELP prometheus_http_request_duration_seconds Histogram of latencies for HTTP requests. +# TYPE prometheus_http_request_duration_seconds histogram +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.1"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.2"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.4"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="3"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="8"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="20"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="60"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="120"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 688 +prometheus_http_request_duration_seconds_sum{handler="/"} 0.026826932000000022 +prometheus_http_request_duration_seconds_count{handler="/"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.1"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.2"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.4"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="1"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="3"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="8"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="20"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="60"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="120"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="+Inf"} 29524 +prometheus_http_request_duration_seconds_sum{handler="/-/healthy"} 0.7400570460000002 +prometheus_http_request_duration_seconds_count{handler="/-/healthy"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/-/ready"} 0.005040123000000002 +prometheus_http_request_duration_seconds_count{handler="/-/ready"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.1"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.2"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.4"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="1"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="3"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="8"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="20"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="60"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="120"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="+Inf"} 48 +prometheus_http_request_duration_seconds_sum{handler="/alerts"} 0.011801602999999999 +prometheus_http_request_duration_seconds_count{handler="/alerts"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.1"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.2"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.4"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="1"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="3"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="8"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="20"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="60"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="120"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="+Inf"} 27 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/*path"} 0.001724389 +prometheus_http_request_duration_seconds_count{handler="/api/v1/*path"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.1"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.2"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.4"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="1"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="3"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="8"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="20"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="60"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="120"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="+Inf"} 8 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/alertmanagers"} 0.042492975999999995 +prometheus_http_request_duration_seconds_count{handler="/api/v1/alertmanagers"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.1"} 14630 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.2"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.4"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="1"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="3"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="8"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="20"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="60"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="120"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="+Inf"} 14635 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/alerts"} 19.028669391999912 +prometheus_http_request_duration_seconds_count{handler="/api/v1/alerts"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/format_query"} 0.023786675 +prometheus_http_request_duration_seconds_count{handler="/api/v1/format_query"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.1"} 17773 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.2"} 17860 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.4"} 17939 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="1"} 17970 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="3"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="8"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="20"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="60"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="120"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 17971 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/label/:name/values"} 99.95326355699925 +prometheus_http_request_duration_seconds_count{handler="/api/v1/label/:name/values"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.1"} 4905 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.2"} 4912 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.4"} 4916 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="1"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="3"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="8"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="20"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="60"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="120"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="+Inf"} 4921 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/labels"} 12.963155722000025 +prometheus_http_request_duration_seconds_count{handler="/api/v1/labels"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.1"} 2073 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.2"} 2091 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.4"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="1"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="3"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="8"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="20"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="60"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="120"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="+Inf"} 2093 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/metadata"} 16.90794258600001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/metadata"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.1"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.2"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.4"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="1"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="3"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="8"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="20"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="60"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="120"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="+Inf"} 12 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/notifications"} 0.029363403 +prometheus_http_request_duration_seconds_count{handler="/api/v1/notifications"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.1"} 3 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.4"} 11 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="1"} 29 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="3"} 79 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="8"} 116 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="20"} 187 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="60"} 224 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="120"} 234 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="+Inf"} 263 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/notifications/live"} 85150.78366682903 +prometheus_http_request_duration_seconds_count{handler="/api/v1/notifications/live"} 263 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.1"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.2"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.4"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="1"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="3"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="8"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="20"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="60"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="120"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="+Inf"} 265 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/parse_query"} 0.7265021480000003 +prometheus_http_request_duration_seconds_count{handler="/api/v1/parse_query"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.1"} 3051 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.2"} 3082 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.4"} 3173 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="1"} 3213 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="3"} 3216 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="8"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="20"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="60"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="120"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="+Inf"} 3217 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query"} 70.23571750499987 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.1"} 831 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.2"} 839 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.4"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="1"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="3"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="8"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="20"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="60"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="120"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="+Inf"} 841 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_exemplars"} 3.6569132899999968 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query_exemplars"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.1"} 116338 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.2"} 116784 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.4"} 117924 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="1"} 118147 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="3"} 118155 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="8"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="20"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="60"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="120"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="+Inf"} 118157 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_range"} 832.1240439999854 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query_range"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.1"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.2"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.4"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="1"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="3"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="8"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="20"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="60"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="120"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="+Inf"} 16 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/read"} 0.07308876400000001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/read"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.1"} 88415 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.2"} 88627 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.4"} 88659 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="1"} 88661 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="3"} 88661 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="8"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="20"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="60"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="120"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="+Inf"} 88662 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/rules"} 629.5237672670012 +prometheus_http_request_duration_seconds_count{handler="/api/v1/rules"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.1"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.2"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.4"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="1"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="3"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="8"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="20"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="60"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="120"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="+Inf"} 142 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/scrape_pools"} 0.17501777799999996 +prometheus_http_request_duration_seconds_count{handler="/api/v1/scrape_pools"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.1"} 1217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.2"} 1236 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.4"} 1244 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="1"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="3"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="8"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="20"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="60"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="120"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="+Inf"} 1246 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/series"} 19.03228310300001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/series"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.1"} 4412 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.2"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.4"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="1"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="3"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="8"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="20"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="60"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="120"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="+Inf"} 4413 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/buildinfo"} 5.222974727000008 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.1"} 84669 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.2"} 84716 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.4"} 84721 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="1"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="3"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="8"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="20"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="60"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="120"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="+Inf"} 84723 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/config"} 228.93575751199964 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/config"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.1"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.2"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.4"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="1"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="3"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="8"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="20"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="60"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="120"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="+Inf"} 32 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/flags"} 0.093302815 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/flags"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.1"} 862 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.2"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.4"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="1"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="3"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="8"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="20"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="60"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="120"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="+Inf"} 863 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/runtimeinfo"} 8.009312817000001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.1"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.2"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.4"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="1"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="3"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="8"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="20"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="60"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="120"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="+Inf"} 94 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/tsdb"} 1.3378732679999994 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/tsdb"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/walreplay"} 0.034643267 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/walreplay"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.1"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.2"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.4"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="1"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="3"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="8"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="20"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="60"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="120"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="+Inf"} 191 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/targets"} 0.5432347889999999 +prometheus_http_request_duration_seconds_count{handler="/api/v1/targets"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.1"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.2"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.4"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="1"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="3"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="8"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="20"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="60"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="120"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="+Inf"} 18 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/targets/metadata"} 0.043789973 +prometheus_http_request_duration_seconds_count{handler="/api/v1/targets/metadata"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.1"} 385 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.2"} 481 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.4"} 528 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="1"} 612 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="3"} 634 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="8"} 640 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="20"} 641 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="60"} 642 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="120"} 643 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="+Inf"} 643 +prometheus_http_request_duration_seconds_sum{handler="/assets/*filepath"} 280.0660388799997 +prometheus_http_request_duration_seconds_count{handler="/assets/*filepath"} 643 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.1"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.2"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.4"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="1"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="3"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="8"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="20"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="60"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="120"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="+Inf"} 103 +prometheus_http_request_duration_seconds_sum{handler="/classic/static/*filepath"} 0.006113046000000001 +prometheus_http_request_duration_seconds_count{handler="/classic/static/*filepath"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.1"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.2"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.4"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="1"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="3"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="8"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="20"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="60"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="120"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="+Inf"} 13 +prometheus_http_request_duration_seconds_sum{handler="/config"} 0.0024490289999999997 +prometheus_http_request_duration_seconds_count{handler="/config"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.1"} 33 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.2"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.4"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="1"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="3"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="8"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="20"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="60"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="120"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="+Inf"} 34 +prometheus_http_request_duration_seconds_sum{handler="/consoles/*filepath"} 0.5689515199999999 +prometheus_http_request_duration_seconds_count{handler="/consoles/*filepath"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/debug/*subpath"} 0.086499352 +prometheus_http_request_duration_seconds_count{handler="/debug/*subpath"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.1"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.2"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.4"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="1"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="3"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="8"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="20"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="60"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="120"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="+Inf"} 177 +prometheus_http_request_duration_seconds_sum{handler="/favicon.ico"} 0.05591882500000002 +prometheus_http_request_duration_seconds_count{handler="/favicon.ico"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.1"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.2"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.4"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="1"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="3"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="8"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="20"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="60"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="120"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="+Inf"} 770 +prometheus_http_request_duration_seconds_sum{handler="/favicon.svg"} 0.3058455699999999 +prometheus_http_request_duration_seconds_count{handler="/favicon.svg"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/federate"} 0.05996980699999999 +prometheus_http_request_duration_seconds_count{handler="/federate"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.1"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.2"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.4"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="1"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="3"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="8"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="20"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="60"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="120"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="+Inf"} 9 +prometheus_http_request_duration_seconds_sum{handler="/flags"} 0.001586781 +prometheus_http_request_duration_seconds_count{handler="/flags"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.1"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.2"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.4"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="1"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="3"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="8"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="20"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="60"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="120"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="+Inf"} 751 +prometheus_http_request_duration_seconds_sum{handler="/graph"} 0.032583203000000005 +prometheus_http_request_duration_seconds_count{handler="/graph"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.1"} 3.988654e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.2"} 4.052461e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.4"} 4.057771e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="1"} 4.058809e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="3"} 4.059071e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="8"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="20"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="60"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="120"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="+Inf"} 4.059092e+06 +prometheus_http_request_duration_seconds_sum{handler="/metrics"} 107584.12561354278 +prometheus_http_request_duration_seconds_count{handler="/metrics"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.1"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.2"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.4"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="1"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="3"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="8"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="20"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="60"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="120"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="+Inf"} 1774 +prometheus_http_request_duration_seconds_sum{handler="/query"} 0.5220201759999993 +prometheus_http_request_duration_seconds_count{handler="/query"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.1"} 8672 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.2"} 8672 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.4"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="1"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="3"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="8"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="20"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="60"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="120"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="+Inf"} 8673 +prometheus_http_request_duration_seconds_sum{handler="/rules"} 2.776021043000005 +prometheus_http_request_duration_seconds_count{handler="/rules"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.1"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.2"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.4"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="1"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="3"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="8"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="20"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="60"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="120"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="+Inf"} 20 +prometheus_http_request_duration_seconds_sum{handler="/service-discovery"} 0.004057062 +prometheus_http_request_duration_seconds_count{handler="/service-discovery"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.1"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.2"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.4"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="1"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="3"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="8"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="20"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="60"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="120"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="+Inf"} 46 +prometheus_http_request_duration_seconds_sum{handler="/status"} 0.010107473 +prometheus_http_request_duration_seconds_count{handler="/status"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.1"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.2"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.4"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="1"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="3"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="8"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="20"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="60"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="120"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="+Inf"} 39 +prometheus_http_request_duration_seconds_sum{handler="/targets"} 0.009001180000000001 +prometheus_http_request_duration_seconds_count{handler="/targets"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/tsdb-status"} 0.018204165 +prometheus_http_request_duration_seconds_count{handler="/tsdb-status"} 49 +# HELP prometheus_http_requests_total Counter of HTTP requests. +# TYPE prometheus_http_requests_total counter +prometheus_http_requests_total{code="200",handler="/"} 0 +prometheus_http_requests_total{code="200",handler="/-/healthy"} 29524 +prometheus_http_requests_total{code="200",handler="/-/quit"} 0 +prometheus_http_requests_total{code="200",handler="/-/ready"} 49 +prometheus_http_requests_total{code="200",handler="/-/reload"} 0 +prometheus_http_requests_total{code="200",handler="/alertmanager-discovery"} 0 +prometheus_http_requests_total{code="200",handler="/alerts"} 48 +prometheus_http_requests_total{code="200",handler="/api/v1/*path"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/clean_tombstones"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/delete_series"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/snapshot"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/alertmanagers"} 8 +prometheus_http_requests_total{code="200",handler="/api/v1/alerts"} 14635 +prometheus_http_requests_total{code="200",handler="/api/v1/format_query"} 3 +prometheus_http_requests_total{code="200",handler="/api/v1/label/:name/values"} 14679 +prometheus_http_requests_total{code="200",handler="/api/v1/labels"} 4915 +prometheus_http_requests_total{code="200",handler="/api/v1/metadata"} 2093 +prometheus_http_requests_total{code="200",handler="/api/v1/notifications"} 12 +prometheus_http_requests_total{code="200",handler="/api/v1/notifications/live"} 4529 +prometheus_http_requests_total{code="200",handler="/api/v1/otlp/v1/metrics"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/parse_query"} 247 +prometheus_http_requests_total{code="200",handler="/api/v1/query"} 326411 +prometheus_http_requests_total{code="200",handler="/api/v1/query_exemplars"} 841 +prometheus_http_requests_total{code="200",handler="/api/v1/query_range"} 1.183734e+06 +prometheus_http_requests_total{code="200",handler="/api/v1/read"} 16 +prometheus_http_requests_total{code="200",handler="/api/v1/rules"} 88658 +prometheus_http_requests_total{code="200",handler="/api/v1/scrape_pools"} 142 +prometheus_http_requests_total{code="200",handler="/api/v1/series"} 1235 +prometheus_http_requests_total{code="200",handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_requests_total{code="200",handler="/api/v1/status/config"} 84722 +prometheus_http_requests_total{code="200",handler="/api/v1/status/flags"} 32 +prometheus_http_requests_total{code="200",handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_requests_total{code="200",handler="/api/v1/status/tsdb"} 94 +prometheus_http_requests_total{code="200",handler="/api/v1/status/walreplay"} 49 +prometheus_http_requests_total{code="200",handler="/api/v1/targets"} 191 +prometheus_http_requests_total{code="200",handler="/api/v1/targets/metadata"} 16 +prometheus_http_requests_total{code="200",handler="/api/v1/write"} 0 +prometheus_http_requests_total{code="200",handler="/assets/*filepath"} 2213 +prometheus_http_requests_total{code="200",handler="/classic/static/*filepath"} 0 +prometheus_http_requests_total{code="200",handler="/config"} 13 +prometheus_http_requests_total{code="200",handler="/consoles/*filepath"} 17 +prometheus_http_requests_total{code="200",handler="/debug/*subpath"} 3 +prometheus_http_requests_total{code="200",handler="/favicon.ico"} 0 +prometheus_http_requests_total{code="200",handler="/favicon.svg"} 769 +prometheus_http_requests_total{code="200",handler="/federate"} 4 +prometheus_http_requests_total{code="200",handler="/flags"} 9 +prometheus_http_requests_total{code="200",handler="/graph"} 0 +prometheus_http_requests_total{code="200",handler="/manifest.json"} 0 +prometheus_http_requests_total{code="200",handler="/metrics"} 4.059092e+06 +prometheus_http_requests_total{code="200",handler="/query"} 1774 +prometheus_http_requests_total{code="200",handler="/rules"} 8673 +prometheus_http_requests_total{code="200",handler="/service-discovery"} 20 +prometheus_http_requests_total{code="200",handler="/status"} 46 +prometheus_http_requests_total{code="200",handler="/targets"} 39 +prometheus_http_requests_total{code="200",handler="/tsdb-status"} 49 +prometheus_http_requests_total{code="200",handler="/version"} 0 +prometheus_http_requests_total{code="204",handler="/api/v1/*path"} 27 +prometheus_http_requests_total{code="204",handler="/api/v1/notifications/live"} 5 +prometheus_http_requests_total{code="301",handler="/debug/*subpath"} 1 +prometheus_http_requests_total{code="302",handler="/"} 688 +prometheus_http_requests_total{code="302",handler="/graph"} 751 +prometheus_http_requests_total{code="400",handler="/api/v1/format_query"} 1 +prometheus_http_requests_total{code="400",handler="/api/v1/label/:name/values"} 3292 +prometheus_http_requests_total{code="400",handler="/api/v1/labels"} 6 +prometheus_http_requests_total{code="400",handler="/api/v1/parse_query"} 18 +prometheus_http_requests_total{code="400",handler="/api/v1/query"} 155 +prometheus_http_requests_total{code="400",handler="/api/v1/query_range"} 263 +prometheus_http_requests_total{code="400",handler="/api/v1/rules"} 4 +prometheus_http_requests_total{code="400",handler="/api/v1/series"} 11 +prometheus_http_requests_total{code="400",handler="/api/v1/targets/metadata"} 2 +prometheus_http_requests_total{code="404",handler="/assets/*filepath"} 12 +prometheus_http_requests_total{code="404",handler="/classic/static/*filepath"} 103 +prometheus_http_requests_total{code="404",handler="/consoles/*filepath"} 17 +prometheus_http_requests_total{code="404",handler="/favicon.ico"} 177 +prometheus_http_requests_total{code="416",handler="/favicon.svg"} 1 +prometheus_http_requests_total{code="422",handler="/api/v1/query"} 114 +prometheus_http_requests_total{code="422",handler="/api/v1/query_range"} 31 +prometheus_http_requests_total{code="499",handler="/api/v1/query"} 7 +prometheus_http_requests_total{code="499",handler="/api/v1/query_range"} 20 +prometheus_http_requests_total{code="503",handler="/api/v1/query"} 7 +prometheus_http_requests_total{code="503",handler="/api/v1/query_range"} 4 +prometheus_http_requests_total{code="503",handler="/api/v1/status/config"} 1 +# HELP prometheus_http_response_size_bytes Histogram of response size for HTTP requests. +# TYPE prometheus_http_response_size_bytes histogram +prometheus_http_response_size_bytes_bucket{handler="/",le="100"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="10000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="100000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+06"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+07"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+08"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+09"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="+Inf"} 688 +prometheus_http_response_size_bytes_sum{handler="/"} 19952 +prometheus_http_response_size_bytes_count{handler="/"} 688 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="10000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+06"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+07"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+08"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+09"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="+Inf"} 29524 +prometheus_http_response_size_bytes_sum{handler="/-/healthy"} 885720 +prometheus_http_response_size_bytes_count{handler="/-/healthy"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/-/ready"} 1372 +prometheus_http_response_size_bytes_count{handler="/-/ready"} 49 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="10000"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="100000"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+06"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+07"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+08"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+09"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="+Inf"} 48 +prometheus_http_response_size_bytes_sum{handler="/alerts"} 84096 +prometheus_http_response_size_bytes_count{handler="/alerts"} 48 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="100"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="10000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="100000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+06"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+07"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+08"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+09"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="+Inf"} 27 +prometheus_http_response_size_bytes_sum{handler="/api/v1/*path"} 0 +prometheus_http_response_size_bytes_count{handler="/api/v1/*path"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="10000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="100000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+06"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+07"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+08"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+09"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="+Inf"} 8 +prometheus_http_response_size_bytes_sum{handler="/api/v1/alertmanagers"} 1064 +prometheus_http_response_size_bytes_count{handler="/api/v1/alertmanagers"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="10000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="100000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+06"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+07"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+08"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+09"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="+Inf"} 14635 +prometheus_http_response_size_bytes_sum{handler="/api/v1/alerts"} 1.0260926e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/alerts"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="10000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/api/v1/format_query"} 495 +prometheus_http_response_size_bytes_count{handler="/api/v1/format_query"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100"} 8167 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1000"} 15939 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="10000"} 17901 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100000"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+06"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+07"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+08"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+09"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 17971 +prometheus_http_response_size_bytes_sum{handler="/api/v1/label/:name/values"} 2.0009454e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/label/:name/values"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100"} 102 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1000"} 4908 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="10000"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100000"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+06"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+07"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+08"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+09"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="+Inf"} 4921 +prometheus_http_response_size_bytes_sum{handler="/api/v1/labels"} 3.16046e+06 +prometheus_http_response_size_bytes_count{handler="/api/v1/labels"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100"} 170 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1000"} 1368 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="10000"} 1368 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100000"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+06"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+07"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+08"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+09"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="+Inf"} 2093 +prometheus_http_response_size_bytes_sum{handler="/api/v1/metadata"} 1.5950362e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/metadata"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="100"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="10000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="100000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+06"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+07"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+08"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+09"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="+Inf"} 12 +prometheus_http_response_size_bytes_sum{handler="/api/v1/notifications"} 360 +prometheus_http_response_size_bytes_count{handler="/api/v1/notifications"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="100"} 4529 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="10000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="100000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+06"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+07"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+08"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+09"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="+Inf"} 4534 +prometheus_http_response_size_bytes_sum{handler="/api/v1/notifications/live"} 1365 +prometheus_http_response_size_bytes_count{handler="/api/v1/notifications/live"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="10000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="100000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+06"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+07"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+08"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+09"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="+Inf"} 265 +prometheus_http_response_size_bytes_sum{handler="/api/v1/parse_query"} 67620 +prometheus_http_response_size_bytes_count{handler="/api/v1/parse_query"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100"} 25147 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1000"} 325005 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="10000"} 325143 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100000"} 326635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+06"} 326692 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+07"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+08"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+09"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="+Inf"} 326694 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query"} 2.34481756e+08 +prometheus_http_response_size_bytes_count{handler="/api/v1/query"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="100"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="10000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="100000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+06"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+07"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+08"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+09"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="+Inf"} 841 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query_exemplars"} 50460 +prometheus_http_response_size_bytes_count{handler="/api/v1/query_exemplars"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100"} 69552 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1000"} 917165 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="10000"} 1.152103e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100000"} 1.183391e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+06"} 1.183621e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+07"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+08"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+09"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="+Inf"} 1.184052e+06 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query_range"} 1.869092376e+09 +prometheus_http_response_size_bytes_count{handler="/api/v1/query_range"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="10000"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="100000"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+06"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+07"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+08"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+09"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="+Inf"} 16 +prometheus_http_response_size_bytes_sum{handler="/api/v1/read"} 50800 +prometheus_http_response_size_bytes_count{handler="/api/v1/read"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="10000"} 88656 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100000"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+06"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+07"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+08"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+09"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="+Inf"} 88662 +prometheus_http_response_size_bytes_sum{handler="/api/v1/rules"} 7.01925022e+08 +prometheus_http_response_size_bytes_count{handler="/api/v1/rules"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="10000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="100000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+06"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+07"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+08"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+09"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="+Inf"} 142 +prometheus_http_response_size_bytes_sum{handler="/api/v1/scrape_pools"} 18318 +prometheus_http_response_size_bytes_count{handler="/api/v1/scrape_pools"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100"} 1011 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1000"} 1233 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="10000"} 1244 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100000"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+06"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+07"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+08"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+09"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="+Inf"} 1246 +prometheus_http_response_size_bytes_sum{handler="/api/v1/series"} 235829 +prometheus_http_response_size_bytes_count{handler="/api/v1/series"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="10000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="100000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+06"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+07"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+08"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+09"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="+Inf"} 4413 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/buildinfo"} 807579 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="10000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="100000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+06"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+07"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+08"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+09"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="+Inf"} 84723 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/config"} 6.3710963e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/config"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="10000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="100000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+06"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+07"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+08"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+09"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="+Inf"} 32 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/flags"} 31968 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/flags"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="10000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="100000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+06"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+07"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+08"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+09"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="+Inf"} 863 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/runtimeinfo"} 240400 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1000"} 92 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="10000"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="100000"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+06"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+07"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+08"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+09"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="+Inf"} 94 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/tsdb"} 66441 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/tsdb"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="100"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/walreplay"} 3381 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/walreplay"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1000"} 184 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="10000"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="100000"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+06"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+07"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+08"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+09"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="+Inf"} 191 +prometheus_http_response_size_bytes_sum{handler="/api/v1/targets"} 191150 +prometheus_http_response_size_bytes_count{handler="/api/v1/targets"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="10000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="100000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+06"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+07"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+08"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+09"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="+Inf"} 18 +prometheus_http_response_size_bytes_sum{handler="/api/v1/targets/metadata"} 2736 +prometheus_http_response_size_bytes_count{handler="/api/v1/targets/metadata"} 18 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="100"} 12 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="10000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="100000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+06"} 958 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+07"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+08"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+09"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="+Inf"} 2225 +prometheus_http_response_size_bytes_sum{handler="/assets/*filepath"} 3.525958804e+09 +prometheus_http_response_size_bytes_count{handler="/assets/*filepath"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="100"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="10000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="100000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+06"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+07"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+08"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+09"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="+Inf"} 103 +prometheus_http_response_size_bytes_sum{handler="/classic/static/*filepath"} 1957 +prometheus_http_response_size_bytes_count{handler="/classic/static/*filepath"} 103 +prometheus_http_response_size_bytes_bucket{handler="/config",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/config",le="10000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="100000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+06"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+07"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+08"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+09"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="+Inf"} 13 +prometheus_http_response_size_bytes_sum{handler="/config"} 22776 +prometheus_http_response_size_bytes_count{handler="/config"} 13 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="100"} 17 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1000"} 17 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="10000"} 31 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="100000"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+06"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+07"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+08"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+09"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="+Inf"} 34 +prometheus_http_response_size_bytes_sum{handler="/consoles/*filepath"} 175556 +prometheus_http_response_size_bytes_count{handler="/consoles/*filepath"} 34 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1000"} 1 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="10000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/debug/*subpath"} 7062 +prometheus_http_response_size_bytes_count{handler="/debug/*subpath"} 4 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="100"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="10000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="100000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+06"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+07"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+08"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+09"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="+Inf"} 177 +prometheus_http_response_size_bytes_sum{handler="/favicon.ico"} 3363 +prometheus_http_response_size_bytes_count{handler="/favicon.ico"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1000"} 1 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="10000"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="100000"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+06"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+07"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+08"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+09"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="+Inf"} 770 +prometheus_http_response_size_bytes_sum{handler="/favicon.svg"} 2.135541e+06 +prometheus_http_response_size_bytes_count{handler="/favicon.svg"} 770 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="100"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1000"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="10000"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/federate"} 28439 +prometheus_http_response_size_bytes_count{handler="/federate"} 4 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="10000"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="100000"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+06"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+07"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+08"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+09"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="+Inf"} 9 +prometheus_http_response_size_bytes_sum{handler="/flags"} 15768 +prometheus_http_response_size_bytes_count{handler="/flags"} 9 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="100"} 130 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1000"} 744 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="10000"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="100000"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+06"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+07"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+08"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+09"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="+Inf"} 751 +prometheus_http_response_size_bytes_sum{handler="/graph"} 164775 +prometheus_http_response_size_bytes_count{handler="/graph"} 751 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="10000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100000"} 4.059088e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+06"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+07"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+08"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+09"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="+Inf"} 4.059092e+06 +prometheus_http_response_size_bytes_sum{handler="/metrics"} 8.2515439707e+10 +prometheus_http_response_size_bytes_count{handler="/metrics"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/query",le="10000"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="100000"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+06"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+07"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+08"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+09"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="+Inf"} 1774 +prometheus_http_response_size_bytes_sum{handler="/query"} 3.108048e+06 +prometheus_http_response_size_bytes_count{handler="/query"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="10000"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="100000"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+06"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+07"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+08"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+09"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="+Inf"} 8673 +prometheus_http_response_size_bytes_sum{handler="/rules"} 1.5195096e+07 +prometheus_http_response_size_bytes_count{handler="/rules"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="10000"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="100000"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+06"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+07"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+08"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+09"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="+Inf"} 20 +prometheus_http_response_size_bytes_sum{handler="/service-discovery"} 35040 +prometheus_http_response_size_bytes_count{handler="/service-discovery"} 20 +prometheus_http_response_size_bytes_bucket{handler="/status",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/status",le="10000"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="100000"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+06"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+07"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+08"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+09"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="+Inf"} 46 +prometheus_http_response_size_bytes_sum{handler="/status"} 80592 +prometheus_http_response_size_bytes_count{handler="/status"} 46 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="10000"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="100000"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+06"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+07"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+08"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+09"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="+Inf"} 39 +prometheus_http_response_size_bytes_sum{handler="/targets"} 68328 +prometheus_http_response_size_bytes_count{handler="/targets"} 39 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/tsdb-status"} 85848 +prometheus_http_response_size_bytes_count{handler="/tsdb-status"} 49 +# HELP prometheus_notifications_alertmanagers_discovered The number of alertmanagers discovered and active. +# TYPE prometheus_notifications_alertmanagers_discovered gauge +prometheus_notifications_alertmanagers_discovered 1 +# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to errors when sending to Alertmanager. +# TYPE prometheus_notifications_dropped_total counter +prometheus_notifications_dropped_total 0 +# HELP prometheus_notifications_errors_total Total number of sent alerts affected by errors. +# TYPE prometheus_notifications_errors_total counter +prometheus_notifications_errors_total{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 0 +# HELP prometheus_notifications_latency_seconds Latency quantiles for sending alert notifications. +# TYPE prometheus_notifications_latency_seconds summary +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.5"} 0.001566044 +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.9"} 0.003927931 +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.99"} 0.013928135 +prometheus_notifications_latency_seconds_sum{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 194.15032606200046 +prometheus_notifications_latency_seconds_count{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 75180 +# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. +# TYPE prometheus_notifications_queue_capacity gauge +prometheus_notifications_queue_capacity 10000 +# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. +# TYPE prometheus_notifications_queue_length gauge +prometheus_notifications_queue_length 0 +# HELP prometheus_notifications_sent_total Total number of alerts sent. +# TYPE prometheus_notifications_sent_total counter +prometheus_notifications_sent_total{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 141616 +# HELP prometheus_ready Whether Prometheus startup was fully completed and the server is ready for normal operation. +# TYPE prometheus_ready gauge +prometheus_ready 1 +# HELP prometheus_remote_storage_exemplars_in_total Exemplars in to remote storage, compare to exemplars out for queue managers. +# TYPE prometheus_remote_storage_exemplars_in_total counter +prometheus_remote_storage_exemplars_in_total 4.959738e+06 +# HELP prometheus_remote_storage_highest_timestamp_in_seconds Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet. +# TYPE prometheus_remote_storage_highest_timestamp_in_seconds gauge +prometheus_remote_storage_highest_timestamp_in_seconds 1.738949375e+09 +# HELP prometheus_remote_storage_histograms_in_total HistogramSamples in to remote storage, compare to histograms out for queue managers. +# TYPE prometheus_remote_storage_histograms_in_total counter +prometheus_remote_storage_histograms_in_total 0 +# HELP prometheus_remote_storage_samples_in_total Samples in to remote storage, compare to samples out for queue managers. +# TYPE prometheus_remote_storage_samples_in_total counter +prometheus_remote_storage_samples_in_total 6.00447296e+08 +# HELP prometheus_remote_storage_string_interner_zero_reference_releases_total The number of times release has been called for strings that are not interned. +# TYPE prometheus_remote_storage_string_interner_zero_reference_releases_total counter +prometheus_remote_storage_string_interner_zero_reference_releases_total 0 +# HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. +# TYPE prometheus_rule_evaluation_duration_seconds summary +prometheus_rule_evaluation_duration_seconds{quantile="0.5"} 0.000214623 +prometheus_rule_evaluation_duration_seconds{quantile="0.9"} 0.001456135 +prometheus_rule_evaluation_duration_seconds{quantile="0.99"} 0.008111814 +prometheus_rule_evaluation_duration_seconds_sum 5209.704794862625 +prometheus_rule_evaluation_duration_seconds_count 7.203456e+06 +# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. +# TYPE prometheus_rule_evaluation_failures_total counter +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +# HELP prometheus_rule_evaluations_total The total number of rule evaluations. +# TYPE prometheus_rule_evaluations_total counter +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 118092 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 118090 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1.4761e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 1.476125e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 649495 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 649484 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 1.358035e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 1.358035e+06 +# HELP prometheus_rule_group_duration_seconds The duration of rule group evaluations. +# TYPE prometheus_rule_group_duration_seconds summary +prometheus_rule_group_duration_seconds{quantile="0.01"} 0.000735928 +prometheus_rule_group_duration_seconds{quantile="0.05"} 0.000818857 +prometheus_rule_group_duration_seconds{quantile="0.5"} 0.004852081 +prometheus_rule_group_duration_seconds{quantile="0.9"} 0.022897759 +prometheus_rule_group_duration_seconds{quantile="0.99"} 0.069327797 +prometheus_rule_group_duration_seconds_sum 5335.451440133042 +prometheus_rule_group_duration_seconds_count 472359 +# HELP prometheus_rule_group_interval_seconds The interval of a rule group. +# TYPE prometheus_rule_group_interval_seconds gauge +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 15 +# HELP prometheus_rule_group_iterations_missed_total The total number of rule group evaluations missed due to slow rule group evaluation. +# TYPE prometheus_rule_group_iterations_missed_total counter +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 1 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +# HELP prometheus_rule_group_iterations_total The total number of scheduled rule group evaluations, whether executed or missed. +# TYPE prometheus_rule_group_iterations_total counter +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 59046 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 59045 +# HELP prometheus_rule_group_last_duration_seconds The duration of the last rule group evaluation. +# TYPE prometheus_rule_group_last_duration_seconds gauge +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.000754015 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0.001104624 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.022040842 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.048928087 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0.002766703 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0.002218466 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0.010245447 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.009232393 +# HELP prometheus_rule_group_last_evaluation_samples The number of samples returned during the last rule group evaluation. +# TYPE prometheus_rule_group_last_evaluation_samples gauge +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 2 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 2 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 10 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 10 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 11 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 11 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +# HELP prometheus_rule_group_last_evaluation_timestamp_seconds The timestamp of the last rule group evaluation in seconds. +# TYPE prometheus_rule_group_last_evaluation_timestamp_seconds gauge +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 1.738949373753179e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 1.738949366900781e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1.7389493632087085e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 1.7389493666743164e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 1.7389493675395968e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 1.7389493623381927e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 1.738949366856423e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 1.738949369917499e+09 +# HELP prometheus_rule_group_last_restore_duration_seconds The duration of the last alert rules alerts restoration using the `ALERTS_FOR_STATE` series. +# TYPE prometheus_rule_group_last_restore_duration_seconds gauge +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.002032625 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 9.1853e-05 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.000166088 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.000108127 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 6.408e-06 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 2.621e-06 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 8.4979e-05 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.000104444 +# HELP prometheus_rule_group_last_rule_duration_sum_seconds The sum of time in seconds it took to evaluate each rule in the group regardless of concurrency. This should be higher than the group duration if rules are evaluated concurrently. +# TYPE prometheus_rule_group_last_rule_duration_sum_seconds gauge +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.000685176 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0.001035099 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.021769662 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.042017156 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0.00260535 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0.002069356 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0.009905481 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.009056893 +# HELP prometheus_rule_group_rules The number of rules. +# TYPE prometheus_rule_group_rules gauge +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 2 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 2 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 25 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 25 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 11 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 11 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 23 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 23 +# HELP prometheus_sd_azure_cache_hit_total Number of cache hit during refresh. +# TYPE prometheus_sd_azure_cache_hit_total counter +prometheus_sd_azure_cache_hit_total 0 +# HELP prometheus_sd_azure_failures_total Number of Azure service discovery refresh failures. +# TYPE prometheus_sd_azure_failures_total counter +prometheus_sd_azure_failures_total 0 +# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. +# TYPE prometheus_sd_consul_rpc_duration_seconds summary +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. +# TYPE prometheus_sd_consul_rpc_failures_total counter +prometheus_sd_consul_rpc_failures_total 0 +# HELP prometheus_sd_discovered_targets Current number of discovered targets. +# TYPE prometheus_sd_discovered_targets gauge +prometheus_sd_discovered_targets{config="alertmanager",name="scrape"} 1 +prometheus_sd_discovered_targets{config="blackbox",name="scrape"} 1 +prometheus_sd_discovered_targets{config="caddy",name="scrape"} 1 +prometheus_sd_discovered_targets{config="cadvisor",name="scrape"} 1 +prometheus_sd_discovered_targets{config="config-0",name="notify"} 1 +prometheus_sd_discovered_targets{config="grafana",name="scrape"} 1 +prometheus_sd_discovered_targets{config="node",name="scrape"} 1 +prometheus_sd_discovered_targets{config="prometheus",name="scrape"} 1 +prometheus_sd_discovered_targets{config="random",name="scrape"} 4 +# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. +# TYPE prometheus_sd_dns_lookup_failures_total counter +prometheus_sd_dns_lookup_failures_total 0 +# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. +# TYPE prometheus_sd_dns_lookups_total counter +prometheus_sd_dns_lookups_total 0 +# HELP prometheus_sd_failed_configs Current number of service discovery configurations that failed to load. +# TYPE prometheus_sd_failed_configs gauge +prometheus_sd_failed_configs{name="notify"} 0 +prometheus_sd_failed_configs{name="scrape"} 0 +# HELP prometheus_sd_file_mtime_seconds Timestamp (mtime) of files read by FileSD. Timestamp is set at read time. +# TYPE prometheus_sd_file_mtime_seconds gauge +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/alertmanager.yml"} 1.701295557e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/cadvisor.yml"} 1.705433682e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/node.yml"} 1.701295553e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/random.yml"} 1.579263729e+09 +# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. +# TYPE prometheus_sd_file_read_errors_total counter +prometheus_sd_file_read_errors_total 0 +# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. +# TYPE prometheus_sd_file_scan_duration_seconds summary +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} 9.5384e-05 +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} 0.000275679 +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} 0.000275679 +prometheus_sd_file_scan_duration_seconds_sum 4.751347856999997 +prometheus_sd_file_scan_duration_seconds_count 11812 +# HELP prometheus_sd_file_watcher_errors_total The number of File-SD errors caused by filesystem watch failures. +# TYPE prometheus_sd_file_watcher_errors_total counter +prometheus_sd_file_watcher_errors_total 0 +# HELP prometheus_sd_http_failures_total Number of HTTP service discovery refresh failures. +# TYPE prometheus_sd_http_failures_total counter +prometheus_sd_http_failures_total 0 +# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. +# TYPE prometheus_sd_kubernetes_events_total counter +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +# HELP prometheus_sd_kubernetes_failures_total The number of failed WATCH/LIST requests. +# TYPE prometheus_sd_kubernetes_failures_total counter +prometheus_sd_kubernetes_failures_total 0 +# HELP prometheus_sd_kuma_fetch_duration_seconds The duration of a Kuma MADS fetch call. +# TYPE prometheus_sd_kuma_fetch_duration_seconds summary +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.5"} NaN +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.9"} NaN +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.99"} NaN +prometheus_sd_kuma_fetch_duration_seconds_sum 0 +prometheus_sd_kuma_fetch_duration_seconds_count 0 +# HELP prometheus_sd_kuma_fetch_failures_total The number of Kuma MADS fetch call failures. +# TYPE prometheus_sd_kuma_fetch_failures_total counter +prometheus_sd_kuma_fetch_failures_total 0 +# HELP prometheus_sd_kuma_fetch_skipped_updates_total The number of Kuma MADS fetch calls that result in no updates to the targets. +# TYPE prometheus_sd_kuma_fetch_skipped_updates_total counter +prometheus_sd_kuma_fetch_skipped_updates_total 0 +# HELP prometheus_sd_linode_failures_total Number of Linode service discovery refresh failures. +# TYPE prometheus_sd_linode_failures_total counter +prometheus_sd_linode_failures_total 0 +# HELP prometheus_sd_nomad_failures_total Number of nomad service discovery refresh failures. +# TYPE prometheus_sd_nomad_failures_total counter +prometheus_sd_nomad_failures_total 0 +# HELP prometheus_sd_received_updates_total Total number of update events received from the SD providers. +# TYPE prometheus_sd_received_updates_total counter +prometheus_sd_received_updates_total{name="notify"} 2 +prometheus_sd_received_updates_total{name="scrape"} 11820 +# HELP prometheus_sd_updates_delayed_total Total number of update events that couldn't be sent immediately. +# TYPE prometheus_sd_updates_delayed_total counter +prometheus_sd_updates_delayed_total{name="notify"} 0 +prometheus_sd_updates_delayed_total{name="scrape"} 0 +# HELP prometheus_sd_updates_total Total number of update events sent to the SD consumers. +# TYPE prometheus_sd_updates_total counter +prometheus_sd_updates_total{name="notify"} 1 +prometheus_sd_updates_total{name="scrape"} 2953 +# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. +# TYPE prometheus_target_interval_length_seconds summary +prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14.982591232 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14.997567414 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 14.999977915 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15.000793403 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15.017607167 +prometheus_target_interval_length_seconds_sum{interval="15s"} 9.742237453453667e+06 +prometheus_target_interval_length_seconds_count{interval="15s"} 649482 +# HELP prometheus_target_metadata_cache_bytes The number of bytes that are currently used for storing metric metadata in the cache +# TYPE prometheus_target_metadata_cache_bytes gauge +prometheus_target_metadata_cache_bytes{scrape_job="alertmanager"} 6817 +prometheus_target_metadata_cache_bytes{scrape_job="blackbox"} 661 +prometheus_target_metadata_cache_bytes{scrape_job="caddy"} 2365 +prometheus_target_metadata_cache_bytes{scrape_job="cadvisor"} 4547 +prometheus_target_metadata_cache_bytes{scrape_job="grafana"} 37608 +prometheus_target_metadata_cache_bytes{scrape_job="node"} 13900 +prometheus_target_metadata_cache_bytes{scrape_job="prometheus"} 20265 +prometheus_target_metadata_cache_bytes{scrape_job="random"} 792 +# HELP prometheus_target_metadata_cache_entries Total number of metric metadata entries in the cache +# TYPE prometheus_target_metadata_cache_entries gauge +prometheus_target_metadata_cache_entries{scrape_job="alertmanager"} 86 +prometheus_target_metadata_cache_entries{scrape_job="blackbox"} 13 +prometheus_target_metadata_cache_entries{scrape_job="caddy"} 47 +prometheus_target_metadata_cache_entries{scrape_job="cadvisor"} 93 +prometheus_target_metadata_cache_entries{scrape_job="grafana"} 373 +prometheus_target_metadata_cache_entries{scrape_job="node"} 293 +prometheus_target_metadata_cache_entries{scrape_job="prometheus"} 237 +prometheus_target_metadata_cache_entries{scrape_job="random"} 16 +# HELP prometheus_target_scrape_pool_exceeded_label_limits_total Total number of times scrape pools hit the label limits, during sync or config reload. +# TYPE prometheus_target_scrape_pool_exceeded_label_limits_total counter +prometheus_target_scrape_pool_exceeded_label_limits_total 0 +# HELP prometheus_target_scrape_pool_exceeded_target_limit_total Total number of times scrape pools hit the target limit, during sync or config reload. +# TYPE prometheus_target_scrape_pool_exceeded_target_limit_total counter +prometheus_target_scrape_pool_exceeded_target_limit_total 0 +# HELP prometheus_target_scrape_pool_reloads_failed_total Total number of failed scrape pool reloads. +# TYPE prometheus_target_scrape_pool_reloads_failed_total counter +prometheus_target_scrape_pool_reloads_failed_total 0 +# HELP prometheus_target_scrape_pool_reloads_total Total number of scrape pool reloads. +# TYPE prometheus_target_scrape_pool_reloads_total counter +prometheus_target_scrape_pool_reloads_total 0 +# HELP prometheus_target_scrape_pool_symboltable_items Current number of symbols in table for this scrape pool. +# TYPE prometheus_target_scrape_pool_symboltable_items gauge +prometheus_target_scrape_pool_symboltable_items{scrape_job="alertmanager"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="blackbox"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="caddy"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="cadvisor"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="grafana"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="node"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="prometheus"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="random"} 0 +# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. +# TYPE prometheus_target_scrape_pool_sync_total counter +prometheus_target_scrape_pool_sync_total{scrape_job="alertmanager"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="blackbox"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="caddy"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="cadvisor"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="grafana"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="node"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="random"} 2953 +# HELP prometheus_target_scrape_pool_target_limit Maximum number of targets allowed in this scrape pool. +# TYPE prometheus_target_scrape_pool_target_limit gauge +prometheus_target_scrape_pool_target_limit{scrape_job="alertmanager"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="blackbox"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="caddy"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="cadvisor"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="grafana"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="node"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="prometheus"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="random"} 0 +# HELP prometheus_target_scrape_pool_targets Current number of targets in this scrape pool. +# TYPE prometheus_target_scrape_pool_targets gauge +prometheus_target_scrape_pool_targets{scrape_job="alertmanager"} 1 +prometheus_target_scrape_pool_targets{scrape_job="blackbox"} 1 +prometheus_target_scrape_pool_targets{scrape_job="caddy"} 1 +prometheus_target_scrape_pool_targets{scrape_job="cadvisor"} 1 +prometheus_target_scrape_pool_targets{scrape_job="grafana"} 1 +prometheus_target_scrape_pool_targets{scrape_job="node"} 1 +prometheus_target_scrape_pool_targets{scrape_job="prometheus"} 1 +prometheus_target_scrape_pool_targets{scrape_job="random"} 4 +# HELP prometheus_target_scrape_pools_failed_total Total number of scrape pool creations that failed. +# TYPE prometheus_target_scrape_pools_failed_total counter +prometheus_target_scrape_pools_failed_total 0 +# HELP prometheus_target_scrape_pools_total Total number of scrape pool creation attempts. +# TYPE prometheus_target_scrape_pools_total counter +prometheus_target_scrape_pools_total 8 +# HELP prometheus_target_scrapes_cache_flush_forced_total How many times a scrape cache was flushed due to getting big while scrapes are failing. +# TYPE prometheus_target_scrapes_cache_flush_forced_total counter +prometheus_target_scrapes_cache_flush_forced_total 0 +# HELP prometheus_target_scrapes_exceeded_body_size_limit_total Total number of scrapes that hit the body size limit +# TYPE prometheus_target_scrapes_exceeded_body_size_limit_total counter +prometheus_target_scrapes_exceeded_body_size_limit_total 0 +# HELP prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total Total number of scrapes that hit the native histogram bucket limit and were rejected. +# TYPE prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total counter +prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total 0 +# HELP prometheus_target_scrapes_exceeded_sample_limit_total Total number of scrapes that hit the sample limit and were rejected. +# TYPE prometheus_target_scrapes_exceeded_sample_limit_total counter +prometheus_target_scrapes_exceeded_sample_limit_total 0 +# HELP prometheus_target_scrapes_exemplar_out_of_order_total Total number of exemplar rejected due to not being out of the expected order. +# TYPE prometheus_target_scrapes_exemplar_out_of_order_total counter +prometheus_target_scrapes_exemplar_out_of_order_total 0 +# HELP prometheus_target_scrapes_sample_duplicate_timestamp_total Total number of samples rejected due to duplicate timestamps but different values. +# TYPE prometheus_target_scrapes_sample_duplicate_timestamp_total counter +prometheus_target_scrapes_sample_duplicate_timestamp_total 0 +# HELP prometheus_target_scrapes_sample_out_of_bounds_total Total number of samples rejected due to timestamp falling outside of the time bounds. +# TYPE prometheus_target_scrapes_sample_out_of_bounds_total counter +prometheus_target_scrapes_sample_out_of_bounds_total 0 +# HELP prometheus_target_scrapes_sample_out_of_order_total Total number of samples rejected due to not being out of the expected order. +# TYPE prometheus_target_scrapes_sample_out_of_order_total counter +prometheus_target_scrapes_sample_out_of_order_total 455 +# HELP prometheus_target_sync_failed_total Total number of target sync failures. +# TYPE prometheus_target_sync_failed_total counter +prometheus_target_sync_failed_total{scrape_job="alertmanager"} 0 +prometheus_target_sync_failed_total{scrape_job="blackbox"} 0 +prometheus_target_sync_failed_total{scrape_job="caddy"} 0 +prometheus_target_sync_failed_total{scrape_job="cadvisor"} 0 +prometheus_target_sync_failed_total{scrape_job="grafana"} 0 +prometheus_target_sync_failed_total{scrape_job="node"} 0 +prometheus_target_sync_failed_total{scrape_job="prometheus"} 0 +prometheus_target_sync_failed_total{scrape_job="random"} 0 +# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. +# TYPE prometheus_target_sync_length_seconds summary +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.01"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.05"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.5"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.9"} 0.000141485 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.99"} 0.000141485 +prometheus_target_sync_length_seconds_sum{scrape_job="alertmanager"} 0.13103036 +prometheus_target_sync_length_seconds_count{scrape_job="alertmanager"} 2953 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.01"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.05"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.5"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.9"} 6.2134e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.99"} 6.2134e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="blackbox"} 0.6044201539999996 +prometheus_target_sync_length_seconds_count{scrape_job="blackbox"} 2953 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.01"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.05"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.5"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.9"} 7.8256e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.99"} 7.8256e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="caddy"} 0.10369844599999971 +prometheus_target_sync_length_seconds_count{scrape_job="caddy"} 2953 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.01"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.05"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.5"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.9"} 4.2337e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.99"} 4.2337e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="cadvisor"} 0.10489659999999998 +prometheus_target_sync_length_seconds_count{scrape_job="cadvisor"} 2953 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.01"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.05"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.5"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.9"} 1.7284e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.99"} 1.7284e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="grafana"} 0.09031192700000017 +prometheus_target_sync_length_seconds_count{scrape_job="grafana"} 2953 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.01"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.05"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.5"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.9"} 7.416e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.99"} 7.416e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="node"} 0.11539821299999993 +prometheus_target_sync_length_seconds_count{scrape_job="node"} 2953 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 1.961e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 1.961e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.10655758600000016 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 2953 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.01"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.05"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.5"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.9"} 4.8586e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.99"} 4.8586e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="random"} 0.20406449899999993 +prometheus_target_sync_length_seconds_count{scrape_job="random"} 2953 +# HELP prometheus_template_text_expansion_failures_total The total number of template text expansion failures. +# TYPE prometheus_template_text_expansion_failures_total counter +prometheus_template_text_expansion_failures_total 0 +# HELP prometheus_template_text_expansions_total The total number of template text expansions. +# TYPE prometheus_template_text_expansions_total counter +prometheus_template_text_expansions_total 2.126277e+06 +# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. +# TYPE prometheus_treecache_watcher_goroutines gauge +prometheus_treecache_watcher_goroutines 0 +# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. +# TYPE prometheus_treecache_zookeeper_failures_total counter +prometheus_treecache_zookeeper_failures_total 0 +# HELP prometheus_tsdb_blocks_loaded Number of currently loaded data blocks +# TYPE prometheus_tsdb_blocks_loaded gauge +prometheus_tsdb_blocks_loaded 17 +# HELP prometheus_tsdb_checkpoint_creations_failed_total Total number of checkpoint creations that failed. +# TYPE prometheus_tsdb_checkpoint_creations_failed_total counter +prometheus_tsdb_checkpoint_creations_failed_total 0 +# HELP prometheus_tsdb_checkpoint_creations_total Total number of checkpoint creations attempted. +# TYPE prometheus_tsdb_checkpoint_creations_total counter +prometheus_tsdb_checkpoint_creations_total 62 +# HELP prometheus_tsdb_checkpoint_deletions_failed_total Total number of checkpoint deletions that failed. +# TYPE prometheus_tsdb_checkpoint_deletions_failed_total counter +prometheus_tsdb_checkpoint_deletions_failed_total 0 +# HELP prometheus_tsdb_checkpoint_deletions_total Total number of checkpoint deletions attempted. +# TYPE prometheus_tsdb_checkpoint_deletions_total counter +prometheus_tsdb_checkpoint_deletions_total 62 +# HELP prometheus_tsdb_clean_start -1: lockfile is disabled. 0: a lockfile from a previous execution was replaced. 1: lockfile creation was clean +# TYPE prometheus_tsdb_clean_start gauge +prometheus_tsdb_clean_start 1 +# HELP prometheus_tsdb_compaction_chunk_range_seconds Final time range of chunks on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_range_seconds histogram +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="100"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="400"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1600"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6400"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="25600"} 676 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="102400"} 1555 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="409600"} 2379 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1.6384e+06"} 34068 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6.5536e+06"} 4.819758e+06 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="2.62144e+07"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_range_seconds_sum 8.917524773366e+12 +prometheus_tsdb_compaction_chunk_range_seconds_count 4.861956e+06 +# HELP prometheus_tsdb_compaction_chunk_samples Final number of samples on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_samples histogram +prometheus_tsdb_compaction_chunk_samples_bucket{le="4"} 1407 +prometheus_tsdb_compaction_chunk_samples_bucket{le="6"} 1544 +prometheus_tsdb_compaction_chunk_samples_bucket{le="9"} 1881 +prometheus_tsdb_compaction_chunk_samples_bucket{le="13.5"} 2065 +prometheus_tsdb_compaction_chunk_samples_bucket{le="20.25"} 2782 +prometheus_tsdb_compaction_chunk_samples_bucket{le="30.375"} 4342 +prometheus_tsdb_compaction_chunk_samples_bucket{le="45.5625"} 6180 +prometheus_tsdb_compaction_chunk_samples_bucket{le="68.34375"} 11518 +prometheus_tsdb_compaction_chunk_samples_bucket{le="102.515625"} 13890 +prometheus_tsdb_compaction_chunk_samples_bucket{le="153.7734375"} 4.810155e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="230.66015625"} 4.86058e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="345.990234375"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_samples_sum 5.86000708e+08 +prometheus_tsdb_compaction_chunk_samples_count 4.861956e+06 +# HELP prometheus_tsdb_compaction_chunk_size_bytes Final size of chunks on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_size_bytes histogram +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="32"} 1233 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="48"} 156238 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="72"} 2.006456e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="108"} 3.568405e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="162"} 3.835144e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="243"} 4.034591e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="364.5"} 4.505646e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="546.75"} 4.69694e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="820.125"} 4.78551e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1230.1875"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1845.28125"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="2767.921875"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_sum 6.81852972e+08 +prometheus_tsdb_compaction_chunk_size_bytes_count 4.861956e+06 +# HELP prometheus_tsdb_compaction_duration_seconds Duration of compaction runs +# TYPE prometheus_tsdb_compaction_duration_seconds histogram +prometheus_tsdb_compaction_duration_seconds_bucket{le="1"} 61 +prometheus_tsdb_compaction_duration_seconds_bucket{le="2"} 155 +prometheus_tsdb_compaction_duration_seconds_bucket{le="4"} 180 +prometheus_tsdb_compaction_duration_seconds_bucket{le="8"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="16"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="32"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="64"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="128"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="256"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="512"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="1024"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="2048"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="4096"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="8192"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="+Inf"} 183 +prometheus_tsdb_compaction_duration_seconds_sum 254.9095696899999 +prometheus_tsdb_compaction_duration_seconds_count 183 +# HELP prometheus_tsdb_compaction_populating_block Set to 1 when a block is currently being written to the disk. +# TYPE prometheus_tsdb_compaction_populating_block gauge +prometheus_tsdb_compaction_populating_block 0 +# HELP prometheus_tsdb_compactions_failed_total Total number of compactions that failed for the partition. +# TYPE prometheus_tsdb_compactions_failed_total counter +prometheus_tsdb_compactions_failed_total 0 +# HELP prometheus_tsdb_compactions_skipped_total Total number of skipped compactions due to disabled auto compaction. +# TYPE prometheus_tsdb_compactions_skipped_total counter +prometheus_tsdb_compactions_skipped_total 0 +# HELP prometheus_tsdb_compactions_total Total number of compactions that were executed for the partition. +# TYPE prometheus_tsdb_compactions_total counter +prometheus_tsdb_compactions_total 183 +# HELP prometheus_tsdb_compactions_triggered_total Total number of triggered compactions for the partition. +# TYPE prometheus_tsdb_compactions_triggered_total counter +prometheus_tsdb_compactions_triggered_total 14855 +# HELP prometheus_tsdb_data_replay_duration_seconds Time taken to replay the data on disk. +# TYPE prometheus_tsdb_data_replay_duration_seconds gauge +prometheus_tsdb_data_replay_duration_seconds 5.550163617 +# HELP prometheus_tsdb_exemplar_exemplars_appended_total Total number of appended exemplars. +# TYPE prometheus_tsdb_exemplar_exemplars_appended_total counter +prometheus_tsdb_exemplar_exemplars_appended_total 0 +# HELP prometheus_tsdb_exemplar_exemplars_in_storage Number of exemplars currently in circular storage. +# TYPE prometheus_tsdb_exemplar_exemplars_in_storage gauge +prometheus_tsdb_exemplar_exemplars_in_storage 0 +# HELP prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds The timestamp of the oldest exemplar stored in circular storage. Useful to check for what timerange the current exemplar buffer limit allows. This usually means the last timestampfor all exemplars for a typical setup. This is not true though if one of the series timestamp is in future compared to rest series. +# TYPE prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds gauge +prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds 0 +# HELP prometheus_tsdb_exemplar_max_exemplars Total number of exemplars the exemplar storage can store, resizeable. +# TYPE prometheus_tsdb_exemplar_max_exemplars gauge +prometheus_tsdb_exemplar_max_exemplars 0 +# HELP prometheus_tsdb_exemplar_out_of_order_exemplars_total Total number of out of order exemplar ingestion failed attempts. +# TYPE prometheus_tsdb_exemplar_out_of_order_exemplars_total counter +prometheus_tsdb_exemplar_out_of_order_exemplars_total 0 +# HELP prometheus_tsdb_exemplar_series_with_exemplars_in_storage Number of series with exemplars currently in circular storage. +# TYPE prometheus_tsdb_exemplar_series_with_exemplars_in_storage gauge +prometheus_tsdb_exemplar_series_with_exemplars_in_storage 0 +# HELP prometheus_tsdb_head_active_appenders Number of currently active appender transactions +# TYPE prometheus_tsdb_head_active_appenders gauge +prometheus_tsdb_head_active_appenders 0 +# HELP prometheus_tsdb_head_chunks Total number of chunks in the head block. +# TYPE prometheus_tsdb_head_chunks gauge +prometheus_tsdb_head_chunks 31476 +# HELP prometheus_tsdb_head_chunks_created_total Total number of chunks created in the head +# TYPE prometheus_tsdb_head_chunks_created_total counter +prometheus_tsdb_head_chunks_created_total 4.893432e+06 +# HELP prometheus_tsdb_head_chunks_removed_total Total number of chunks removed in the head +# TYPE prometheus_tsdb_head_chunks_removed_total counter +prometheus_tsdb_head_chunks_removed_total 4.861956e+06 +# HELP prometheus_tsdb_head_chunks_storage_size_bytes Size of the chunks_head directory. +# TYPE prometheus_tsdb_head_chunks_storage_size_bytes gauge +prometheus_tsdb_head_chunks_storage_size_bytes 7.237299e+06 +# HELP prometheus_tsdb_head_gc_duration_seconds Runtime of garbage collection in the head block. +# TYPE prometheus_tsdb_head_gc_duration_seconds summary +prometheus_tsdb_head_gc_duration_seconds_sum 4.773801686000001 +prometheus_tsdb_head_gc_duration_seconds_count 123 +# HELP prometheus_tsdb_head_max_time Maximum timestamp of the head block. The unit is decided by the library consumer. +# TYPE prometheus_tsdb_head_max_time gauge +prometheus_tsdb_head_max_time 1.738949375191e+12 +# HELP prometheus_tsdb_head_max_time_seconds Maximum timestamp of the head block. +# TYPE prometheus_tsdb_head_max_time_seconds gauge +prometheus_tsdb_head_max_time_seconds 1.738949375e+09 +# HELP prometheus_tsdb_head_min_time Minimum time bound of the head block. The unit is decided by the library consumer. +# TYPE prometheus_tsdb_head_min_time gauge +prometheus_tsdb_head_min_time 1.738944000171e+12 +# HELP prometheus_tsdb_head_min_time_seconds Minimum time bound of the head block. +# TYPE prometheus_tsdb_head_min_time_seconds gauge +prometheus_tsdb_head_min_time_seconds 1.738944e+09 +# HELP prometheus_tsdb_head_out_of_order_samples_appended_total Total number of appended out of order samples. +# TYPE prometheus_tsdb_head_out_of_order_samples_appended_total counter +prometheus_tsdb_head_out_of_order_samples_appended_total{type="float"} 0 +prometheus_tsdb_head_out_of_order_samples_appended_total{type="histogram"} 0 +# HELP prometheus_tsdb_head_samples_appended_total Total number of appended samples. +# TYPE prometheus_tsdb_head_samples_appended_total counter +prometheus_tsdb_head_samples_appended_total{type="float"} 5.85543187e+08 +prometheus_tsdb_head_samples_appended_total{type="histogram"} 0 +# HELP prometheus_tsdb_head_series Total number of series in the head block. +# TYPE prometheus_tsdb_head_series gauge +prometheus_tsdb_head_series 10720 +# HELP prometheus_tsdb_head_series_created_total Total number of series created in the head +# TYPE prometheus_tsdb_head_series_created_total counter +prometheus_tsdb_head_series_created_total 18541 +# HELP prometheus_tsdb_head_series_not_found_total Total number of requests for series that were not found. +# TYPE prometheus_tsdb_head_series_not_found_total counter +prometheus_tsdb_head_series_not_found_total 0 +# HELP prometheus_tsdb_head_series_removed_total Total number of series removed in the head +# TYPE prometheus_tsdb_head_series_removed_total counter +prometheus_tsdb_head_series_removed_total 7821 +# HELP prometheus_tsdb_head_truncations_failed_total Total number of head truncations that failed. +# TYPE prometheus_tsdb_head_truncations_failed_total counter +prometheus_tsdb_head_truncations_failed_total 0 +# HELP prometheus_tsdb_head_truncations_total Total number of head truncations attempted. +# TYPE prometheus_tsdb_head_truncations_total counter +prometheus_tsdb_head_truncations_total 123 +# HELP prometheus_tsdb_isolation_high_watermark The highest TSDB append ID that has been given out. +# TYPE prometheus_tsdb_isolation_high_watermark gauge +prometheus_tsdb_isolation_high_watermark 7.852949e+06 +# HELP prometheus_tsdb_isolation_low_watermark The lowest TSDB append ID that is still referenced. +# TYPE prometheus_tsdb_isolation_low_watermark gauge +prometheus_tsdb_isolation_low_watermark 7.852949e+06 +# HELP prometheus_tsdb_lowest_timestamp Lowest timestamp value stored in the database. The unit is decided by the library consumer. +# TYPE prometheus_tsdb_lowest_timestamp gauge +prometheus_tsdb_lowest_timestamp 1.73618640004e+12 +# HELP prometheus_tsdb_lowest_timestamp_seconds Lowest timestamp value stored in the database. +# TYPE prometheus_tsdb_lowest_timestamp_seconds gauge +prometheus_tsdb_lowest_timestamp_seconds 1.7361864e+09 +# HELP prometheus_tsdb_mmap_chunk_corruptions_total Total number of memory-mapped chunk corruptions. +# TYPE prometheus_tsdb_mmap_chunk_corruptions_total counter +prometheus_tsdb_mmap_chunk_corruptions_total 0 +# HELP prometheus_tsdb_mmap_chunks_total Total number of chunks that were memory-mapped. +# TYPE prometheus_tsdb_mmap_chunks_total counter +prometheus_tsdb_mmap_chunks_total 4.851264e+06 +# HELP prometheus_tsdb_out_of_bound_samples_total Total number of out of bound samples ingestion failed attempts with out of order support disabled. +# TYPE prometheus_tsdb_out_of_bound_samples_total counter +prometheus_tsdb_out_of_bound_samples_total{type="float"} 0 +# HELP prometheus_tsdb_out_of_order_samples_total Total number of out of order samples ingestion failed attempts due to out of order being disabled. +# TYPE prometheus_tsdb_out_of_order_samples_total counter +prometheus_tsdb_out_of_order_samples_total{type="float"} 517 +prometheus_tsdb_out_of_order_samples_total{type="histogram"} 0 +# HELP prometheus_tsdb_reloads_failures_total Number of times the database failed to reloadBlocks block data from disk. +# TYPE prometheus_tsdb_reloads_failures_total counter +prometheus_tsdb_reloads_failures_total 0 +# HELP prometheus_tsdb_reloads_total Number of times the database reloaded block data from disk. +# TYPE prometheus_tsdb_reloads_total counter +prometheus_tsdb_reloads_total 14822 +# HELP prometheus_tsdb_retention_limit_bytes Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled +# TYPE prometheus_tsdb_retention_limit_bytes gauge +prometheus_tsdb_retention_limit_bytes 0 +# HELP prometheus_tsdb_retention_limit_seconds How long to retain samples in storage. +# TYPE prometheus_tsdb_retention_limit_seconds gauge +prometheus_tsdb_retention_limit_seconds 2.6784e+06 +# HELP prometheus_tsdb_size_retentions_total The number of times that blocks were deleted because the maximum number of bytes was exceeded. +# TYPE prometheus_tsdb_size_retentions_total counter +prometheus_tsdb_size_retentions_total 0 +# HELP prometheus_tsdb_snapshot_replay_error_total Total number snapshot replays that failed. +# TYPE prometheus_tsdb_snapshot_replay_error_total counter +prometheus_tsdb_snapshot_replay_error_total 0 +# HELP prometheus_tsdb_storage_blocks_bytes The number of bytes that are currently used for local storage by all blocks. +# TYPE prometheus_tsdb_storage_blocks_bytes gauge +prometheus_tsdb_storage_blocks_bytes 2.762863592e+09 +# HELP prometheus_tsdb_symbol_table_size_bytes Size of symbol table in memory for loaded blocks +# TYPE prometheus_tsdb_symbol_table_size_bytes gauge +prometheus_tsdb_symbol_table_size_bytes 10616 +# HELP prometheus_tsdb_time_retentions_total The number of times that blocks were deleted because the maximum time limit was exceeded. +# TYPE prometheus_tsdb_time_retentions_total counter +prometheus_tsdb_time_retentions_total 5 +# HELP prometheus_tsdb_tombstone_cleanup_seconds The time taken to recompact blocks to remove tombstones. +# TYPE prometheus_tsdb_tombstone_cleanup_seconds histogram +prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="+Inf"} 0 +prometheus_tsdb_tombstone_cleanup_seconds_sum 0 +prometheus_tsdb_tombstone_cleanup_seconds_count 0 +# HELP prometheus_tsdb_too_old_samples_total Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of time window. +# TYPE prometheus_tsdb_too_old_samples_total counter +prometheus_tsdb_too_old_samples_total{type="float"} 0 +# HELP prometheus_tsdb_vertical_compactions_total Total number of compactions done on overlapping blocks. +# TYPE prometheus_tsdb_vertical_compactions_total counter +prometheus_tsdb_vertical_compactions_total 0 +# HELP prometheus_tsdb_wal_completed_pages_total Total number of completed pages. +# TYPE prometheus_tsdb_wal_completed_pages_total counter +prometheus_tsdb_wal_completed_pages_total 109271 +# HELP prometheus_tsdb_wal_corruptions_total Total number of WAL corruptions. +# TYPE prometheus_tsdb_wal_corruptions_total counter +prometheus_tsdb_wal_corruptions_total 0 +# HELP prometheus_tsdb_wal_fsync_duration_seconds Duration of write log fsync. +# TYPE prometheus_tsdb_wal_fsync_duration_seconds summary +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.5"} NaN +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.9"} NaN +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.99"} NaN +prometheus_tsdb_wal_fsync_duration_seconds_sum 4.842524568000002 +prometheus_tsdb_wal_fsync_duration_seconds_count 123 +# HELP prometheus_tsdb_wal_page_flushes_total Total number of page flushes. +# TYPE prometheus_tsdb_wal_page_flushes_total counter +prometheus_tsdb_wal_page_flushes_total 2.293951e+06 +# HELP prometheus_tsdb_wal_segment_current Write log segment index that TSDB is currently writing to. +# TYPE prometheus_tsdb_wal_segment_current gauge +prometheus_tsdb_wal_segment_current 24726 +# HELP prometheus_tsdb_wal_storage_size_bytes Size of the write log directory. +# TYPE prometheus_tsdb_wal_storage_size_bytes gauge +prometheus_tsdb_wal_storage_size_bytes 6.9168385e+07 +# HELP prometheus_tsdb_wal_truncate_duration_seconds Duration of WAL truncation. +# TYPE prometheus_tsdb_wal_truncate_duration_seconds summary +prometheus_tsdb_wal_truncate_duration_seconds_sum 121.61954577099996 +prometheus_tsdb_wal_truncate_duration_seconds_count 62 +# HELP prometheus_tsdb_wal_truncations_failed_total Total number of write log truncations that failed. +# TYPE prometheus_tsdb_wal_truncations_failed_total counter +prometheus_tsdb_wal_truncations_failed_total 0 +# HELP prometheus_tsdb_wal_truncations_total Total number of write log truncations attempted. +# TYPE prometheus_tsdb_wal_truncations_total counter +prometheus_tsdb_wal_truncations_total 62 +# HELP prometheus_tsdb_wal_writes_failed_total Total number of write log writes that failed. +# TYPE prometheus_tsdb_wal_writes_failed_total counter +prometheus_tsdb_wal_writes_failed_total 0 +# HELP prometheus_web_federation_errors_total Total number of errors that occurred while sending federation responses. +# TYPE prometheus_web_federation_errors_total counter +prometheus_web_federation_errors_total 0 +# HELP prometheus_web_federation_warnings_total Total number of warnings that occurred while sending federation responses. +# TYPE prometheus_web_federation_warnings_total counter +prometheus_web_federation_warnings_total 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 4.059092e+06 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# EOF diff --git a/model/textparse/testdata/alltypes.5mfs.om.txt b/model/textparse/testdata/alltypes.5mfs.om.txt new file mode 100644 index 0000000000..0f5f78b8b9 --- /dev/null +++ b/model/textparse/testdata/alltypes.5mfs.om.txt @@ -0,0 +1,64 @@ +# HELP go_build_info Build information about the main Go module. +# TYPE go_build_info gauge +go_build_info{checksum="",path="",version=""} 1.0 +# HELP promhttp_metric_handler_errors Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors counter +promhttp_metric_handler_errors_total{cause="encoding"} 0.0 +promhttp_metric_handler_errors_created{cause="encoding"} 1.726839813016397e+09 +promhttp_metric_handler_errors_total{cause="gathering"} 0.0 +promhttp_metric_handler_errors_created{cause="gathering"} 1.726839813016395e+09 +# HELP rpc_durations_histogram_seconds RPC latency distributions. +# TYPE rpc_durations_histogram_seconds histogram +rpc_durations_histogram_seconds_bucket{le="-0.00099"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.00089"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0007899999999999999"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0006899999999999999"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0005899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0004899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0003899999999999998"} 0 +rpc_durations_histogram_seconds_bucket{le="-0.0002899999999999998"} 3 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09 +rpc_durations_histogram_seconds_bucket{le="-0.0001899999999999998"} 5 # {dummyID="84741"} -0.00020178290006788965 1.726839814829977e+09 +rpc_durations_histogram_seconds_bucket{le="-8.999999999999979e-05"} 5 +rpc_durations_histogram_seconds_bucket{le="1.0000000000000216e-05"} 8 # {dummyID="19206"} -4.6156147425468016e-05 1.7268398151337721e+09 +rpc_durations_histogram_seconds_bucket{le="0.00011000000000000022"} 9 # {dummyID="3974"} 9.528436760156754e-05 1.726839814526797e+09 +rpc_durations_histogram_seconds_bucket{le="0.00021000000000000023"} 11 # {dummyID="29640"} 0.00017459624183458996 1.7268398139220061e+09 +rpc_durations_histogram_seconds_bucket{le="0.0003100000000000002"} 15 # {dummyID="9818"} 0.0002791130914009552 1.7268398149821382e+09 +rpc_durations_histogram_seconds_bucket{le="0.0004100000000000002"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0005100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0006100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0007100000000000003"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0008100000000000004"} 15 +rpc_durations_histogram_seconds_bucket{le="0.0009100000000000004"} 15 +rpc_durations_histogram_seconds_bucket{le="+Inf"} 15 +rpc_durations_histogram_seconds_sum -8.452185437166741e-05 +rpc_durations_histogram_seconds_count 15 +rpc_durations_histogram_seconds_created 1.726839813016302e+09 +# HELP rpc_durations_seconds RPC latency distributions. +# TYPE rpc_durations_seconds summary +rpc_durations_seconds{service="exponential",quantile="0.5"} 7.689368882420941e-07 +rpc_durations_seconds{service="exponential",quantile="0.9"} 1.6537614174305048e-06 +rpc_durations_seconds{service="exponential",quantile="0.99"} 2.0965499063061924e-06 +rpc_durations_seconds_sum{service="exponential"} 2.0318666372575776e-05 +rpc_durations_seconds_count{service="exponential"} 22 +rpc_durations_seconds_created{service="exponential"} 1.7268398130168908e+09 +rpc_durations_seconds{service="normal",quantile="0.5"} -5.066758674917046e-06 +rpc_durations_seconds{service="normal",quantile="0.9"} 0.0002935723711788224 +rpc_durations_seconds{service="normal",quantile="0.99"} 0.0003023094636293776 +rpc_durations_seconds_sum{service="normal"} -8.452185437166741e-05 +rpc_durations_seconds_count{service="normal"} 15 +rpc_durations_seconds_created{service="normal"} 1.726839813016714e+09 +rpc_durations_seconds{service="uniform",quantile="0.5"} 9.005014931474918e-05 +rpc_durations_seconds{service="uniform",quantile="0.9"} 0.00017801230208182325 +rpc_durations_seconds{service="uniform",quantile="0.99"} 0.00018641524538180192 +rpc_durations_seconds_sum{service="uniform"} 0.0011666095700533677 +rpc_durations_seconds_count{service="uniform"} 11 +rpc_durations_seconds_created{service="uniform"} 1.72683981301684e+09 +# HELP rpc_requests Total number of RPC requests received. +# TYPE rpc_requests counter +rpc_requests_total{service="exponential"} 22.0 +rpc_requests_created{service="exponential"} 1.726839813016893e+09 +rpc_requests_total{service="normal"} 15.0 +rpc_requests_created{service="normal"} 1.726839813016717e+09 +rpc_requests_total{service="uniform"} 11.0 +rpc_requests_created{service="uniform"} 1.7268398130168471e+09 +# EOF diff --git a/notifier/alert.go b/notifier/alert.go new file mode 100644 index 0000000000..88245c9a7f --- /dev/null +++ b/notifier/alert.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "fmt" + "time" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels labels.Labels `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations labels.Labels `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL,omitempty"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return a.Labels.Get(labels.AlertName) +} + +// Hash returns a hash over the alert. It is equivalent to the alert labels hash. +func (a *Alert) Hash() uint64 { + return a.Labels.Hash() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), fmt.Sprintf("%016x", a.Hash())[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true iff the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +func relabelAlerts(relabelConfigs []*relabel.Config, externalLabels labels.Labels, alerts []*Alert) []*Alert { + lb := labels.NewBuilder(labels.EmptyLabels()) + var relabeledAlerts []*Alert + + for _, a := range alerts { + lb.Reset(a.Labels) + externalLabels.Range(func(l labels.Label) { + if a.Labels.Get(l.Name) == "" { + lb.Set(l.Name, l.Value) + } + }) + + keep := relabel.ProcessBuilder(lb, relabelConfigs...) + if !keep { + continue + } + a.Labels = lb.Labels() + relabeledAlerts = append(relabeledAlerts, a) + } + return relabeledAlerts +} diff --git a/notifier/alertmanager.go b/notifier/alertmanager.go new file mode 100644 index 0000000000..8bcf7954ec --- /dev/null +++ b/notifier/alertmanager.go @@ -0,0 +1,90 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "fmt" + "net/url" + "path" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" +) + +// Alertmanager holds Alertmanager endpoint information. +type alertmanager interface { + url() *url.URL +} + +type alertmanagerLabels struct{ labels.Labels } + +const pathLabel = "__alerts_path__" + +func (a alertmanagerLabels) url() *url.URL { + return &url.URL{ + Scheme: a.Get(model.SchemeLabel), + Host: a.Get(model.AddressLabel), + Path: a.Get(pathLabel), + } +} + +// AlertmanagerFromGroup extracts a list of alertmanagers from a target group +// and an associated AlertmanagerConfig. +func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { + var res []alertmanager + var droppedAlertManagers []alertmanager + lb := labels.NewBuilder(labels.EmptyLabels()) + + for _, tlset := range tg.Targets { + lb.Reset(labels.EmptyLabels()) + + for ln, lv := range tlset { + lb.Set(string(ln), string(lv)) + } + // Set configured scheme as the initial scheme label for overwrite. + lb.Set(model.SchemeLabel, cfg.Scheme) + lb.Set(pathLabel, postPath(cfg.PathPrefix, cfg.APIVersion)) + + // Combine target labels with target group labels. + for ln, lv := range tg.Labels { + if _, ok := tlset[ln]; !ok { + lb.Set(string(ln), string(lv)) + } + } + + preRelabel := lb.Labels() + keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) + if !keep { + droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{preRelabel}) + continue + } + + addr := lb.Get(model.AddressLabel) + if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { + return nil, nil, err + } + + res = append(res, alertmanagerLabels{lb.Labels()}) + } + return res, droppedAlertManagers, nil +} + +func postPath(pre string, v config.AlertmanagerAPIVersion) string { + alertPushEndpoint := fmt.Sprintf("/api/%v/alerts", string(v)) + return path.Join("/", pre, alertPushEndpoint) +} diff --git a/notifier/alertmanager_test.go b/notifier/alertmanager_test.go new file mode 100644 index 0000000000..ea27f37be7 --- /dev/null +++ b/notifier/alertmanager_test.go @@ -0,0 +1,62 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/config" +) + +func TestPostPath(t *testing.T) { + cases := []struct { + in, out string + }{ + { + in: "", + out: "/api/v2/alerts", + }, + { + in: "/", + out: "/api/v2/alerts", + }, + { + in: "/prefix", + out: "/prefix/api/v2/alerts", + }, + { + in: "/prefix//", + out: "/prefix/api/v2/alerts", + }, + { + in: "prefix//", + out: "/prefix/api/v2/alerts", + }, + } + for _, c := range cases { + require.Equal(t, c.out, postPath(c.in, config.AlertmanagerAPIVersionV2)) + } +} + +func TestLabelSetNotReused(t *testing.T) { + tg := makeInputTargetGroup() + _, _, err := AlertmanagerFromGroup(tg, &config.AlertmanagerConfig{}) + + require.NoError(t, err) + + // Target modified during alertmanager extraction + require.Equal(t, tg, makeInputTargetGroup()) +} diff --git a/notifier/alertmanagerset.go b/notifier/alertmanagerset.go new file mode 100644 index 0000000000..50471098ad --- /dev/null +++ b/notifier/alertmanagerset.go @@ -0,0 +1,128 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "crypto/md5" + "encoding/hex" + "log/slog" + "net/http" + "sync" + + config_util "github.com/prometheus/common/config" + "github.com/prometheus/sigv4" + "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +// alertmanagerSet contains a set of Alertmanagers discovered via a group of service +// discovery definitions that have a common configuration on how alerts should be sent. +type alertmanagerSet struct { + cfg *config.AlertmanagerConfig + client *http.Client + + metrics *alertMetrics + + mtx sync.RWMutex + ams []alertmanager + droppedAms []alertmanager + logger *slog.Logger +} + +func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger *slog.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { + client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager") + if err != nil { + return nil, err + } + t := client.Transport + + if cfg.SigV4Config != nil { + t, err = sigv4.NewSigV4RoundTripper(cfg.SigV4Config, client.Transport) + if err != nil { + return nil, err + } + } + + client.Transport = t + + s := &alertmanagerSet{ + client: client, + cfg: cfg, + logger: logger, + metrics: metrics, + } + return s, nil +} + +// sync extracts a deduplicated set of Alertmanager endpoints from a list +// of target groups definitions. +func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { + allAms := []alertmanager{} + allDroppedAms := []alertmanager{} + + for _, tg := range tgs { + ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg) + if err != nil { + s.logger.Error("Creating discovered Alertmanagers failed", "err", err) + continue + } + allAms = append(allAms, ams...) + allDroppedAms = append(allDroppedAms, droppedAms...) + } + + s.mtx.Lock() + defer s.mtx.Unlock() + previousAms := s.ams + // Set new Alertmanagers and deduplicate them along their unique URL. + s.ams = []alertmanager{} + s.droppedAms = []alertmanager{} + s.droppedAms = append(s.droppedAms, allDroppedAms...) + seen := map[string]struct{}{} + + for _, am := range allAms { + us := am.url().String() + if _, ok := seen[us]; ok { + continue + } + + // This will initialize the Counters for the AM to 0. + s.metrics.sent.WithLabelValues(us) + s.metrics.errors.WithLabelValues(us) + + seen[us] = struct{}{} + s.ams = append(s.ams, am) + } + // Now remove counters for any removed Alertmanagers. + for _, am := range previousAms { + us := am.url().String() + if _, ok := seen[us]; ok { + continue + } + s.metrics.latency.DeleteLabelValues(us) + s.metrics.sent.DeleteLabelValues(us) + s.metrics.errors.DeleteLabelValues(us) + seen[us] = struct{}{} + } +} + +func (s *alertmanagerSet) configHash() (string, error) { + b, err := yaml.Marshal(s.cfg) + if err != nil { + return "", err + } + hash := md5.Sum(b) + return hex.EncodeToString(hash[:]), nil +} diff --git a/notifier/manager.go b/notifier/manager.go new file mode 100644 index 0000000000..69ce9b221b --- /dev/null +++ b/notifier/manager.go @@ -0,0 +1,545 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" + "github.com/prometheus/common/version" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" +) + +const ( + // DefaultMaxBatchSize is the default maximum number of alerts to send in a single request to the alertmanager. + DefaultMaxBatchSize = 256 + + contentTypeJSON = "application/json" +) + +// String constants for instrumentation. +const ( + namespace = "prometheus" + subsystem = "notifications" + alertmanagerLabel = "alertmanager" +) + +var userAgent = version.PrometheusUserAgent() + +// Manager is responsible for dispatching alert notifications to an +// alert manager service. +type Manager struct { + queue []*Alert + opts *Options + + metrics *alertMetrics + + more chan struct{} + mtx sync.RWMutex + + stopOnce *sync.Once + stopRequested chan struct{} + + alertmanagers map[string]*alertmanagerSet + logger *slog.Logger +} + +// Options are the configurable parameters of a Handler. +type Options struct { + QueueCapacity int + DrainOnShutdown bool + ExternalLabels labels.Labels + RelabelConfigs []*relabel.Config + // Used for sending HTTP requests to the Alertmanager. + Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) + + Registerer prometheus.Registerer + + // MaxBatchSize determines the maximum number of alerts to send in a single request to the alertmanager. + MaxBatchSize int +} + +func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + return client.Do(req.WithContext(ctx)) +} + +// NewManager is the manager constructor. +func NewManager(o *Options, logger *slog.Logger) *Manager { + if o.Do == nil { + o.Do = do + } + // Set default MaxBatchSize if not provided. + if o.MaxBatchSize <= 0 { + o.MaxBatchSize = DefaultMaxBatchSize + } + if logger == nil { + logger = promslog.NewNopLogger() + } + + n := &Manager{ + queue: make([]*Alert, 0, o.QueueCapacity), + more: make(chan struct{}, 1), + stopRequested: make(chan struct{}), + stopOnce: &sync.Once{}, + opts: o, + logger: logger, + } + + queueLenFunc := func() float64 { return float64(n.queueLen()) } + alertmanagersDiscoveredFunc := func() float64 { return float64(len(n.Alertmanagers())) } + + n.metrics = newAlertMetrics( + o.Registerer, + o.QueueCapacity, + queueLenFunc, + alertmanagersDiscoveredFunc, + ) + + return n +} + +// ApplyConfig updates the status state as the new config requires. +func (n *Manager) ApplyConfig(conf *config.Config) error { + n.mtx.Lock() + defer n.mtx.Unlock() + + n.opts.ExternalLabels = conf.GlobalConfig.ExternalLabels + n.opts.RelabelConfigs = conf.AlertingConfig.AlertRelabelConfigs + + amSets := make(map[string]*alertmanagerSet) + // configToAlertmanagers maps alertmanager sets for each unique AlertmanagerConfig, + // helping to avoid dropping known alertmanagers and re-use them without waiting for SD updates when applying the config. + configToAlertmanagers := make(map[string]*alertmanagerSet, len(n.alertmanagers)) + for _, oldAmSet := range n.alertmanagers { + hash, err := oldAmSet.configHash() + if err != nil { + return err + } + configToAlertmanagers[hash] = oldAmSet + } + + for k, cfg := range conf.AlertingConfig.AlertmanagerConfigs.ToMap() { + ams, err := newAlertmanagerSet(cfg, n.logger, n.metrics) + if err != nil { + return err + } + + hash, err := ams.configHash() + if err != nil { + return err + } + + if oldAmSet, ok := configToAlertmanagers[hash]; ok { + ams.ams = oldAmSet.ams + ams.droppedAms = oldAmSet.droppedAms + } + + amSets[k] = ams + } + + n.alertmanagers = amSets + + return nil +} + +func (n *Manager) queueLen() int { + n.mtx.RLock() + defer n.mtx.RUnlock() + + return len(n.queue) +} + +func (n *Manager) nextBatch() []*Alert { + n.mtx.Lock() + defer n.mtx.Unlock() + + var alerts []*Alert + + if maxBatchSize := n.opts.MaxBatchSize; len(n.queue) > maxBatchSize { + alerts = append(make([]*Alert, 0, maxBatchSize), n.queue[:maxBatchSize]...) + n.queue = n.queue[maxBatchSize:] + } else { + alerts = append(make([]*Alert, 0, len(n.queue)), n.queue...) + n.queue = n.queue[:0] + } + + return alerts +} + +// Run dispatches notifications continuously, returning once Stop has been called and all +// pending notifications have been drained from the queue (if draining is enabled). +// +// Dispatching of notifications occurs in parallel to processing target updates to avoid one starving the other. +// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details. +func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { + wg := sync.WaitGroup{} + wg.Add(2) + + go func() { + defer wg.Done() + n.targetUpdateLoop(tsets) + }() + + go func() { + defer wg.Done() + n.sendLoop() + n.drainQueue() + }() + + wg.Wait() + n.logger.Info("Notification manager stopped") +} + +// sendLoop continuously consumes the notifications queue and sends alerts to +// the configured Alertmanagers. +func (n *Manager) sendLoop() { + for { + // If we've been asked to stop, that takes priority over sending any further notifications. + select { + case <-n.stopRequested: + return + default: + select { + case <-n.stopRequested: + return + + case <-n.more: + n.sendOneBatch() + + // If the queue still has items left, kick off the next iteration. + if n.queueLen() > 0 { + n.setMore() + } + } + } + } +} + +// targetUpdateLoop receives updates of target groups and triggers a reload. +func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group) { + for { + // If we've been asked to stop, that takes priority over processing any further target group updates. + select { + case <-n.stopRequested: + return + default: + select { + case <-n.stopRequested: + return + case ts := <-tsets: + n.reload(ts) + } + } + } +} + +func (n *Manager) sendOneBatch() { + alerts := n.nextBatch() + + if !n.sendAll(alerts...) { + n.metrics.dropped.Add(float64(len(alerts))) + } +} + +func (n *Manager) drainQueue() { + if !n.opts.DrainOnShutdown { + if n.queueLen() > 0 { + n.logger.Warn("Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) + n.metrics.dropped.Add(float64(n.queueLen())) + } + + return + } + + n.logger.Info("Draining any remaining notifications...") + + for n.queueLen() > 0 { + n.sendOneBatch() + } + + n.logger.Info("Remaining notifications drained") +} + +func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { + n.mtx.Lock() + defer n.mtx.Unlock() + + for id, tgroup := range tgs { + am, ok := n.alertmanagers[id] + if !ok { + n.logger.Error("couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id)) + continue + } + am.sync(tgroup) + } +} + +// Send queues the given notification requests for processing. +// Panics if called on a handler that is not running. +func (n *Manager) Send(alerts ...*Alert) { + n.mtx.Lock() + defer n.mtx.Unlock() + + alerts = relabelAlerts(n.opts.RelabelConfigs, n.opts.ExternalLabels, alerts) + if len(alerts) == 0 { + return + } + + // Queue capacity should be significantly larger than a single alert + // batch could be. + if d := len(alerts) - n.opts.QueueCapacity; d > 0 { + alerts = alerts[d:] + + n.logger.Warn("Alert batch larger than queue capacity, dropping alerts", "num_dropped", d) + n.metrics.dropped.Add(float64(d)) + } + + // If the queue is full, remove the oldest alerts in favor + // of newer ones. + if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 { + n.queue = n.queue[d:] + + n.logger.Warn("Alert notification queue full, dropping alerts", "num_dropped", d) + n.metrics.dropped.Add(float64(d)) + } + n.queue = append(n.queue, alerts...) + + // Notify sending goroutine that there are alerts to be processed. + n.setMore() +} + +// setMore signals that the alert queue has items. +func (n *Manager) setMore() { + // If we cannot send on the channel, it means the signal already exists + // and has not been consumed yet. + select { + case n.more <- struct{}{}: + default: + } +} + +// Alertmanagers returns a slice of Alertmanager URLs. +func (n *Manager) Alertmanagers() []*url.URL { + n.mtx.RLock() + amSets := n.alertmanagers + n.mtx.RUnlock() + + var res []*url.URL + + for _, ams := range amSets { + ams.mtx.RLock() + for _, am := range ams.ams { + res = append(res, am.url()) + } + ams.mtx.RUnlock() + } + + return res +} + +// DroppedAlertmanagers returns a slice of Alertmanager URLs. +func (n *Manager) DroppedAlertmanagers() []*url.URL { + n.mtx.RLock() + amSets := n.alertmanagers + n.mtx.RUnlock() + + var res []*url.URL + + for _, ams := range amSets { + ams.mtx.RLock() + for _, dam := range ams.droppedAms { + res = append(res, dam.url()) + } + ams.mtx.RUnlock() + } + + return res +} + +// sendAll sends the alerts to all configured Alertmanagers concurrently. +// It returns true if the alerts could be sent successfully to at least one Alertmanager. +func (n *Manager) sendAll(alerts ...*Alert) bool { + if len(alerts) == 0 { + return true + } + + begin := time.Now() + + // cachedPayload represent 'alerts' marshaled for Alertmanager API v2. + // Marshaling happens below. Reference here is for caching between + // for loop iterations. + var cachedPayload []byte + + n.mtx.RLock() + amSets := n.alertmanagers + n.mtx.RUnlock() + + var ( + wg sync.WaitGroup + amSetCovered sync.Map + ) + for k, ams := range amSets { + var ( + payload []byte + err error + amAlerts = alerts + ) + + ams.mtx.RLock() + + if len(ams.ams) == 0 { + ams.mtx.RUnlock() + continue + } + + if len(ams.cfg.AlertRelabelConfigs) > 0 { + amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts) + if len(amAlerts) == 0 { + ams.mtx.RUnlock() + continue + } + // We can't use the cached values from previous iteration. + cachedPayload = nil + } + + switch ams.cfg.APIVersion { + case config.AlertmanagerAPIVersionV2: + { + if cachedPayload == nil { + openAPIAlerts := alertsToOpenAPIAlerts(amAlerts) + + cachedPayload, err = json.Marshal(openAPIAlerts) + if err != nil { + n.logger.Error("Encoding alerts for Alertmanager API v2 failed", "err", err) + ams.mtx.RUnlock() + return false + } + } + + payload = cachedPayload + } + default: + { + n.logger.Error( + fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions), + "err", err, + ) + ams.mtx.RUnlock() + return false + } + } + + if len(ams.cfg.AlertRelabelConfigs) > 0 { + // We can't use the cached values on the next iteration. + cachedPayload = nil + } + + // Being here means len(ams.ams) > 0 + amSetCovered.Store(k, false) + for _, am := range ams.ams { + wg.Add(1) + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout)) + defer cancel() + + go func(ctx context.Context, k string, client *http.Client, url string, payload []byte, count int) { + err := n.sendOne(ctx, client, url, payload) + if err != nil { + n.logger.Error("Error sending alerts", "alertmanager", url, "count", count, "err", err) + n.metrics.errors.WithLabelValues(url).Add(float64(count)) + } else { + amSetCovered.CompareAndSwap(k, false, true) + } + + n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds()) + n.metrics.sent.WithLabelValues(url).Add(float64(count)) + + wg.Done() + }(ctx, k, ams.client, am.url().String(), payload, len(amAlerts)) + } + + ams.mtx.RUnlock() + } + + wg.Wait() + + // Return false if there are any sets which were attempted (e.g. not filtered + // out) but have no successes. + allAmSetsCovered := true + amSetCovered.Range(func(_, value any) bool { + if !value.(bool) { + allAmSetsCovered = false + return false + } + return true + }) + + return allAmSetsCovered +} + +func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []byte) error { + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(b)) + if err != nil { + return err + } + req.Header.Set("User-Agent", userAgent) + req.Header.Set("Content-Type", contentTypeJSON) + resp, err := n.opts.Do(ctx, c, req) + if err != nil { + return err + } + defer func() { + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + }() + + // Any HTTP status 2xx is OK. + if resp.StatusCode/100 != 2 { + return fmt.Errorf("bad response status %s", resp.Status) + } + + return nil +} + +// Stop signals the notification manager to shut down and immediately returns. +// +// Run will return once the notification manager has successfully shut down. +// +// The manager will optionally drain any queued notifications before shutting down. +// +// Stop is safe to call multiple times. +func (n *Manager) Stop() { + n.logger.Info("Stopping notification manager...") + + n.stopOnce.Do(func() { + close(n.stopRequested) + }) +} diff --git a/notifier/notifier_test.go b/notifier/manager_test.go similarity index 82% rename from notifier/notifier_test.go rename to notifier/manager_test.go index 2cdaa9e06d..fd553ad191 100644 --- a/notifier/notifier_test.go +++ b/notifier/manager_test.go @@ -26,52 +26,23 @@ import ( "testing" "time" - "github.com/go-kit/log" - "github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "go.uber.org/atomic" "gopkg.in/yaml.v2" - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + _ "github.com/prometheus/prometheus/discovery/file" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" ) -func TestPostPath(t *testing.T) { - cases := []struct { - in, out string - }{ - { - in: "", - out: "/api/v1/alerts", - }, - { - in: "/", - out: "/api/v1/alerts", - }, - { - in: "/prefix", - out: "/prefix/api/v1/alerts", - }, - { - in: "/prefix//", - out: "/prefix/api/v1/alerts", - }, - { - in: "prefix//", - out: "/prefix/api/v1/alerts", - }, - } - for _, c := range cases { - require.Equal(t, c.out, postPath(c.in, config.AlertmanagerAPIVersionV1)) - } -} +const maxBatchSize = 256 func TestHandlerNextBatch(t *testing.T) { h := NewManager(&Options{}, nil) @@ -139,17 +110,20 @@ func newTestHTTPServerBuilder(expected *[]*Alert, errc chan<- error, u, p string func TestHandlerSendAll(t *testing.T) { var ( - errc = make(chan error, 1) - expected = make([]*Alert, 0, maxBatchSize) - status1, status2 atomic.Int32 + errc = make(chan error, 1) + expected = make([]*Alert, 0, maxBatchSize) + status1, status2, status3 atomic.Int32 ) status1.Store(int32(http.StatusOK)) status2.Store(int32(http.StatusOK)) + status3.Store(int32(http.StatusOK)) server1 := newTestHTTPServerBuilder(&expected, errc, "prometheus", "testing_password", &status1) server2 := newTestHTTPServerBuilder(&expected, errc, "", "", &status2) + server3 := newTestHTTPServerBuilder(&expected, errc, "", "", &status3) defer server1.Close() defer server2.Close() + defer server3.Close() h := NewManager(&Options{}, nil) @@ -169,6 +143,9 @@ func TestHandlerSendAll(t *testing.T) { am2Cfg := config.DefaultAlertmanagerConfig am2Cfg.Timeout = model.Duration(time.Second) + am3Cfg := config.DefaultAlertmanagerConfig + am3Cfg.Timeout = model.Duration(time.Second) + h.alertmanagers["1"] = &alertmanagerSet{ ams: []alertmanager{ alertmanagerMock{ @@ -184,10 +161,18 @@ func TestHandlerSendAll(t *testing.T) { alertmanagerMock{ urlf: func() string { return server2.URL }, }, + alertmanagerMock{ + urlf: func() string { return server3.URL }, + }, }, cfg: &am2Cfg, } + h.alertmanagers["3"] = &alertmanagerSet{ + ams: []alertmanager{}, // empty set + cfg: &am3Cfg, + } + for i := range make([]struct{}, maxBatchSize) { h.queue = append(h.queue, &Alert{ Labels: labels.FromStrings("alertname", strconv.Itoa(i)), @@ -206,14 +191,25 @@ func TestHandlerSendAll(t *testing.T) { } } + // all ams in all sets are up require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly") checkNoErr() + // the only am in set 1 is down status1.Store(int32(http.StatusNotFound)) - require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly") + require.False(t, h.sendAll(h.queue...), "all sends failed unexpectedly") checkNoErr() + // reset it + status1.Store(int32(http.StatusOK)) + + // only one of the ams in set 2 is down status2.Store(int32(http.StatusInternalServerError)) + require.True(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly") + checkNoErr() + + // both ams in set 2 are down + status3.Store(int32(http.StatusInternalServerError)) require.False(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly") checkNoErr() } @@ -225,13 +221,15 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) { expected2 = make([]*Alert, 0, maxBatchSize) expected3 = make([]*Alert, 0) - statusOK atomic.Int32 + status1, status2, status3 atomic.Int32 ) - statusOK.Store(int32(http.StatusOK)) + status1.Store(int32(http.StatusOK)) + status2.Store(int32(http.StatusOK)) + status3.Store(int32(http.StatusOK)) - server1 := newTestHTTPServerBuilder(&expected1, errc, "", "", &statusOK) - server2 := newTestHTTPServerBuilder(&expected2, errc, "", "", &statusOK) - server3 := newTestHTTPServerBuilder(&expected3, errc, "", "", &statusOK) + server1 := newTestHTTPServerBuilder(&expected1, errc, "", "", &status1) + server2 := newTestHTTPServerBuilder(&expected2, errc, "", "", &status2) + server3 := newTestHTTPServerBuilder(&expected3, errc, "", "", &status3) defer server1.Close() defer server2.Close() @@ -330,6 +328,21 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) { } } + // all ams are up + require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly") + checkNoErr() + + // the only am in set 1 goes down + status1.Store(int32(http.StatusInternalServerError)) + require.False(t, h.sendAll(h.queue...), "all sends failed unexpectedly") + checkNoErr() + + // reset set 1 + status1.Store(int32(http.StatusOK)) + + // set 3 loses its only am, but all alerts were dropped + // so there was nothing to send, keeping sendAll true + status3.Store(int32(http.StatusInternalServerError)) require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly") checkNoErr() @@ -347,7 +360,7 @@ func TestCustomDo(t *testing.T) { var received bool h := NewManager(&Options{ - Do: func(_ context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + Do: func(_ context.Context, _ *http.Client, req *http.Request) (*http.Response, error) { received = true body, err := io.ReadAll(req.Body) @@ -371,6 +384,7 @@ func TestCustomDo(t *testing.T) { func TestExternalLabels(t *testing.T) { h := NewManager(&Options{ QueueCapacity: 3 * maxBatchSize, + MaxBatchSize: maxBatchSize, ExternalLabels: labels.FromStrings("a", "b"), RelabelConfigs: []*relabel.Config{ { @@ -405,6 +419,7 @@ func TestExternalLabels(t *testing.T) { func TestHandlerRelabel(t *testing.T) { h := NewManager(&Options{ QueueCapacity: 3 * maxBatchSize, + MaxBatchSize: maxBatchSize, RelabelConfigs: []*relabel.Config{ { SourceLabels: model.LabelNames{"alertname"}, @@ -446,7 +461,7 @@ func TestHandlerQueuing(t *testing.T) { errc = make(chan error, 1) ) - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { // Notify the test function that we have received something. select { case called <- struct{}{}: @@ -483,6 +498,7 @@ func TestHandlerQueuing(t *testing.T) { h := NewManager( &Options{ QueueCapacity: 3 * maxBatchSize, + MaxBatchSize: maxBatchSize, }, nil, ) @@ -573,16 +589,6 @@ func (a alertmanagerMock) url() *url.URL { return u } -func TestLabelSetNotReused(t *testing.T) { - tg := makeInputTargetGroup() - _, _, err := AlertmanagerFromGroup(tg, &config.AlertmanagerConfig{}) - - require.NoError(t, err) - - // Target modified during alertmanager extraction - require.Equal(t, tg, makeInputTargetGroup()) -} - func TestReload(t *testing.T) { tests := []struct { in *targetgroup.Group @@ -697,10 +703,6 @@ func makeInputTargetGroup() *targetgroup.Group { } } -func TestLabelsToOpenAPILabelSet(t *testing.T) { - require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.FromStrings("aaa", "111", "bbb", "222"))) -} - // TestHangingNotifier ensures that the notifier takes into account SD changes even when there are // queued alerts. This test reproduces the issue described in https://github.com/prometheus/prometheus/issues/13676. // and https://github.com/prometheus/prometheus/issues/8768. @@ -711,7 +713,7 @@ func TestHangingNotifier(t *testing.T) { ) var ( - sendTimeout = 10 * time.Millisecond + sendTimeout = 100 * time.Millisecond sdUpdatert = sendTimeout / 2 done = make(chan struct{}) @@ -723,7 +725,7 @@ func TestHangingNotifier(t *testing.T) { // Set up a faulty Alertmanager. var faultyCalled atomic.Bool - faultyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + faultyServer := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { faultyCalled.Store(true) select { case <-done: @@ -735,7 +737,7 @@ func TestHangingNotifier(t *testing.T) { // Set up a functional Alertmanager. var functionalCalled atomic.Bool - functionalServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + functionalServer := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { functionalCalled.Store(true) })) functionalURL, err := url.Parse(functionalServer.URL) @@ -743,7 +745,7 @@ func TestHangingNotifier(t *testing.T) { // Initialize the discovery manager // This is relevant as the updates aren't sent continually in real life, but only each updatert. - // The old implementation of TestHangingNotifier didn't take that into acount. + // The old implementation of TestHangingNotifier didn't take that into account. ctx, cancel := context.WithCancel(context.Background()) defer cancel() reg := prometheus.NewRegistry() @@ -751,7 +753,7 @@ func TestHangingNotifier(t *testing.T) { require.NoError(t, err) sdManager := discovery.NewManager( ctx, - log.NewNopLogger(), + promslog.NewNopLogger(), reg, sdMetrics, discovery.Name("sd-manager"), @@ -1017,3 +1019,107 @@ func TestStop_DrainingEnabled(t *testing.T) { require.Equal(t, int64(2), alertsReceived.Load()) } + +func TestApplyConfig(t *testing.T) { + targetURL := "alertmanager:9093" + targetGroup := &targetgroup.Group{ + Targets: []model.LabelSet{ + { + "__address__": model.LabelValue(targetURL), + }, + }, + } + alertmanagerURL := fmt.Sprintf("http://%s/api/v2/alerts", targetURL) + + n := NewManager(&Options{}, nil) + cfg := &config.Config{} + s := ` +alerting: + alertmanagers: + - file_sd_configs: + - files: + - foo.json +` + // 1. Ensure known alertmanagers are not dropped during ApplyConfig. + require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 1) + + // First, apply the config and reload. + require.NoError(t, n.ApplyConfig(cfg)) + tgs := map[string][]*targetgroup.Group{"config-0": {targetGroup}} + n.reload(tgs) + require.Len(t, n.Alertmanagers(), 1) + require.Equal(t, alertmanagerURL, n.Alertmanagers()[0].String()) + + // Reapply the config. + require.NoError(t, n.ApplyConfig(cfg)) + // Ensure the known alertmanagers are not dropped. + require.Len(t, n.Alertmanagers(), 1) + require.Equal(t, alertmanagerURL, n.Alertmanagers()[0].String()) + + // 2. Ensure known alertmanagers are not dropped during ApplyConfig even when + // the config order changes. + s = ` +alerting: + alertmanagers: + - static_configs: + - file_sd_configs: + - files: + - foo.json +` + require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 2) + + require.NoError(t, n.ApplyConfig(cfg)) + require.Len(t, n.Alertmanagers(), 1) + // Ensure no unnecessary alertmanagers are injected. + require.Empty(t, n.alertmanagers["config-0"].ams) + // Ensure the config order is taken into account. + ams := n.alertmanagers["config-1"].ams + require.Len(t, ams, 1) + require.Equal(t, alertmanagerURL, ams[0].url().String()) + + // 3. Ensure known alertmanagers are reused for new config with identical AlertmanagerConfig. + s = ` +alerting: + alertmanagers: + - file_sd_configs: + - files: + - foo.json + - file_sd_configs: + - files: + - foo.json +` + require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 2) + + require.NoError(t, n.ApplyConfig(cfg)) + require.Len(t, n.Alertmanagers(), 2) + for cfgIdx := range 2 { + ams := n.alertmanagers[fmt.Sprintf("config-%d", cfgIdx)].ams + require.Len(t, ams, 1) + require.Equal(t, alertmanagerURL, ams[0].url().String()) + } + + // 4. Ensure known alertmanagers are reused only for identical AlertmanagerConfig. + s = ` +alerting: + alertmanagers: + - file_sd_configs: + - files: + - foo.json + path_prefix: /bar + - file_sd_configs: + - files: + - foo.json + relabel_configs: + - source_labels: ['__address__'] + regex: 'doesntmatter:1234' + action: drop +` + require.NoError(t, yaml.UnmarshalStrict([]byte(s), cfg)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 2) + + require.NoError(t, n.ApplyConfig(cfg)) + require.Empty(t, n.Alertmanagers()) +} diff --git a/notifier/metric.go b/notifier/metric.go new file mode 100644 index 0000000000..b9a55b3ec7 --- /dev/null +++ b/notifier/metric.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import "github.com/prometheus/client_golang/prometheus" + +type alertMetrics struct { + latency *prometheus.SummaryVec + errors *prometheus.CounterVec + sent *prometheus.CounterVec + dropped prometheus.Counter + queueLength prometheus.GaugeFunc + queueCapacity prometheus.Gauge + alertmanagersDiscovered prometheus.GaugeFunc +} + +func newAlertMetrics(r prometheus.Registerer, queueCap int, queueLen, alertmanagersDiscovered func() float64) *alertMetrics { + m := &alertMetrics{ + latency: prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "latency_seconds", + Help: "Latency quantiles for sending alert notifications.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{alertmanagerLabel}, + ), + errors: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "errors_total", + Help: "Total number of sent alerts affected by errors.", + }, + []string{alertmanagerLabel}, + ), + sent: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "sent_total", + Help: "Total number of alerts sent.", + }, + []string{alertmanagerLabel}, + ), + dropped: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dropped_total", + Help: "Total number of alerts dropped due to errors when sending to Alertmanager.", + }), + queueLength: prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "queue_length", + Help: "The number of alert notifications in the queue.", + }, queueLen), + queueCapacity: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "queue_capacity", + Help: "The capacity of the alert notifications queue.", + }), + alertmanagersDiscovered: prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "prometheus_notifications_alertmanagers_discovered", + Help: "The number of alertmanagers discovered and active.", + }, alertmanagersDiscovered), + } + + m.queueCapacity.Set(float64(queueCap)) + + if r != nil { + r.MustRegister( + m.latency, + m.errors, + m.sent, + m.dropped, + m.queueLength, + m.queueCapacity, + m.alertmanagersDiscovered, + ) + } + + return m +} diff --git a/notifier/notifier.go b/notifier/notifier.go deleted file mode 100644 index 68b0d4961e..0000000000 --- a/notifier/notifier.go +++ /dev/null @@ -1,839 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package notifier - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/go-openapi/strfmt" - "github.com/prometheus/alertmanager/api/v2/models" - "github.com/prometheus/client_golang/prometheus" - config_util "github.com/prometheus/common/config" - "github.com/prometheus/common/model" - "github.com/prometheus/common/sigv4" - "github.com/prometheus/common/version" - "go.uber.org/atomic" - - "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/relabel" -) - -const ( - contentTypeJSON = "application/json" -) - -// String constants for instrumentation. -const ( - namespace = "prometheus" - subsystem = "notifications" - alertmanagerLabel = "alertmanager" -) - -var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) - -// Alert is a generic representation of an alert in the Prometheus eco-system. -type Alert struct { - // Label value pairs for purpose of aggregation, matching, and disposition - // dispatching. This must minimally include an "alertname" label. - Labels labels.Labels `json:"labels"` - - // Extra key/value information which does not define alert identity. - Annotations labels.Labels `json:"annotations"` - - // The known time range for this alert. Both ends are optional. - StartsAt time.Time `json:"startsAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` - GeneratorURL string `json:"generatorURL,omitempty"` -} - -// Name returns the name of the alert. It is equivalent to the "alertname" label. -func (a *Alert) Name() string { - return a.Labels.Get(labels.AlertName) -} - -// Hash returns a hash over the alert. It is equivalent to the alert labels hash. -func (a *Alert) Hash() uint64 { - return a.Labels.Hash() -} - -func (a *Alert) String() string { - s := fmt.Sprintf("%s[%s]", a.Name(), fmt.Sprintf("%016x", a.Hash())[:7]) - if a.Resolved() { - return s + "[resolved]" - } - return s + "[active]" -} - -// Resolved returns true iff the activity interval ended in the past. -func (a *Alert) Resolved() bool { - return a.ResolvedAt(time.Now()) -} - -// ResolvedAt returns true iff the activity interval ended before -// the given timestamp. -func (a *Alert) ResolvedAt(ts time.Time) bool { - if a.EndsAt.IsZero() { - return false - } - return !a.EndsAt.After(ts) -} - -// Manager is responsible for dispatching alert notifications to an -// alert manager service. -type Manager struct { - queue []*Alert - opts *Options - - metrics *alertMetrics - - more chan struct{} - mtx sync.RWMutex - - stopOnce *sync.Once - stopRequested chan struct{} - - alertmanagers map[string]*alertmanagerSet - logger log.Logger -} - -// Options are the configurable parameters of a Handler. -type Options struct { - QueueCapacity int - DrainOnShutdown bool - ExternalLabels labels.Labels - RelabelConfigs []*relabel.Config - // Used for sending HTTP requests to the Alertmanager. - Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) - - Registerer prometheus.Registerer -} - -type alertMetrics struct { - latency *prometheus.SummaryVec - errors *prometheus.CounterVec - sent *prometheus.CounterVec - dropped prometheus.Counter - queueLength prometheus.GaugeFunc - queueCapacity prometheus.Gauge - alertmanagersDiscovered prometheus.GaugeFunc -} - -func newAlertMetrics(r prometheus.Registerer, queueCap int, queueLen, alertmanagersDiscovered func() float64) *alertMetrics { - m := &alertMetrics{ - latency: prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "latency_seconds", - Help: "Latency quantiles for sending alert notifications.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - []string{alertmanagerLabel}, - ), - errors: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "errors_total", - Help: "Total number of errors sending alert notifications.", - }, - []string{alertmanagerLabel}, - ), - sent: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "sent_total", - Help: "Total number of alerts sent.", - }, - []string{alertmanagerLabel}, - ), - dropped: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "dropped_total", - Help: "Total number of alerts dropped due to errors when sending to Alertmanager.", - }), - queueLength: prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "queue_length", - Help: "The number of alert notifications in the queue.", - }, queueLen), - queueCapacity: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "queue_capacity", - Help: "The capacity of the alert notifications queue.", - }), - alertmanagersDiscovered: prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Name: "prometheus_notifications_alertmanagers_discovered", - Help: "The number of alertmanagers discovered and active.", - }, alertmanagersDiscovered), - } - - m.queueCapacity.Set(float64(queueCap)) - - if r != nil { - r.MustRegister( - m.latency, - m.errors, - m.sent, - m.dropped, - m.queueLength, - m.queueCapacity, - m.alertmanagersDiscovered, - ) - } - - return m -} - -func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - return client.Do(req.WithContext(ctx)) -} - -// NewManager is the manager constructor. -func NewManager(o *Options, logger log.Logger) *Manager { - if o.Do == nil { - o.Do = do - } - if logger == nil { - logger = log.NewNopLogger() - } - - n := &Manager{ - queue: make([]*Alert, 0, o.QueueCapacity), - more: make(chan struct{}, 1), - stopRequested: make(chan struct{}), - stopOnce: &sync.Once{}, - opts: o, - logger: logger, - } - - queueLenFunc := func() float64 { return float64(n.queueLen()) } - alertmanagersDiscoveredFunc := func() float64 { return float64(len(n.Alertmanagers())) } - - n.metrics = newAlertMetrics( - o.Registerer, - o.QueueCapacity, - queueLenFunc, - alertmanagersDiscoveredFunc, - ) - - return n -} - -// ApplyConfig updates the status state as the new config requires. -func (n *Manager) ApplyConfig(conf *config.Config) error { - n.mtx.Lock() - defer n.mtx.Unlock() - - n.opts.ExternalLabels = conf.GlobalConfig.ExternalLabels - n.opts.RelabelConfigs = conf.AlertingConfig.AlertRelabelConfigs - - amSets := make(map[string]*alertmanagerSet) - - for k, cfg := range conf.AlertingConfig.AlertmanagerConfigs.ToMap() { - ams, err := newAlertmanagerSet(cfg, n.logger, n.metrics) - if err != nil { - return err - } - - amSets[k] = ams - } - - n.alertmanagers = amSets - - return nil -} - -const maxBatchSize = 64 - -func (n *Manager) queueLen() int { - n.mtx.RLock() - defer n.mtx.RUnlock() - - return len(n.queue) -} - -func (n *Manager) nextBatch() []*Alert { - n.mtx.Lock() - defer n.mtx.Unlock() - - var alerts []*Alert - - if len(n.queue) > maxBatchSize { - alerts = append(make([]*Alert, 0, maxBatchSize), n.queue[:maxBatchSize]...) - n.queue = n.queue[maxBatchSize:] - } else { - alerts = append(make([]*Alert, 0, len(n.queue)), n.queue...) - n.queue = n.queue[:0] - } - - return alerts -} - -// Run dispatches notifications continuously, returning once Stop has been called and all -// pending notifications have been drained from the queue (if draining is enabled). -// -// Dispatching of notifications occurs in parallel to processing target updates to avoid one starving the other. -// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details. -func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { - wg := sync.WaitGroup{} - wg.Add(2) - - go func() { - defer wg.Done() - n.targetUpdateLoop(tsets) - }() - - go func() { - defer wg.Done() - n.sendLoop() - n.drainQueue() - }() - - wg.Wait() - level.Info(n.logger).Log("msg", "Notification manager stopped") -} - -// sendLoop continuously consumes the notifications queue and sends alerts to -// the configured Alertmanagers. -func (n *Manager) sendLoop() { - for { - // If we've been asked to stop, that takes priority over sending any further notifications. - select { - case <-n.stopRequested: - return - default: - select { - case <-n.stopRequested: - return - - case <-n.more: - n.sendOneBatch() - - // If the queue still has items left, kick off the next iteration. - if n.queueLen() > 0 { - n.setMore() - } - } - } - } -} - -// targetUpdateLoop receives updates of target groups and triggers a reload. -func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group) { - for { - // If we've been asked to stop, that takes priority over processing any further target group updates. - select { - case <-n.stopRequested: - return - default: - select { - case <-n.stopRequested: - return - case ts := <-tsets: - n.reload(ts) - } - } - } -} - -func (n *Manager) sendOneBatch() { - alerts := n.nextBatch() - - if !n.sendAll(alerts...) { - n.metrics.dropped.Add(float64(len(alerts))) - } -} - -func (n *Manager) drainQueue() { - if !n.opts.DrainOnShutdown { - if n.queueLen() > 0 { - level.Warn(n.logger).Log("msg", "Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) - n.metrics.dropped.Add(float64(n.queueLen())) - } - - return - } - - level.Info(n.logger).Log("msg", "Draining any remaining notifications...") - - for n.queueLen() > 0 { - n.sendOneBatch() - } - - level.Info(n.logger).Log("msg", "Remaining notifications drained") -} - -func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { - n.mtx.Lock() - defer n.mtx.Unlock() - - for id, tgroup := range tgs { - am, ok := n.alertmanagers[id] - if !ok { - level.Error(n.logger).Log("msg", "couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id)) - continue - } - am.sync(tgroup) - } -} - -// Send queues the given notification requests for processing. -// Panics if called on a handler that is not running. -func (n *Manager) Send(alerts ...*Alert) { - n.mtx.Lock() - defer n.mtx.Unlock() - - alerts = relabelAlerts(n.opts.RelabelConfigs, n.opts.ExternalLabels, alerts) - if len(alerts) == 0 { - return - } - - // Queue capacity should be significantly larger than a single alert - // batch could be. - if d := len(alerts) - n.opts.QueueCapacity; d > 0 { - alerts = alerts[d:] - - level.Warn(n.logger).Log("msg", "Alert batch larger than queue capacity, dropping alerts", "num_dropped", d) - n.metrics.dropped.Add(float64(d)) - } - - // If the queue is full, remove the oldest alerts in favor - // of newer ones. - if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 { - n.queue = n.queue[d:] - - level.Warn(n.logger).Log("msg", "Alert notification queue full, dropping alerts", "num_dropped", d) - n.metrics.dropped.Add(float64(d)) - } - n.queue = append(n.queue, alerts...) - - // Notify sending goroutine that there are alerts to be processed. - n.setMore() -} - -func relabelAlerts(relabelConfigs []*relabel.Config, externalLabels labels.Labels, alerts []*Alert) []*Alert { - lb := labels.NewBuilder(labels.EmptyLabels()) - var relabeledAlerts []*Alert - - for _, a := range alerts { - lb.Reset(a.Labels) - externalLabels.Range(func(l labels.Label) { - if a.Labels.Get(l.Name) == "" { - lb.Set(l.Name, l.Value) - } - }) - - keep := relabel.ProcessBuilder(lb, relabelConfigs...) - if !keep { - continue - } - a.Labels = lb.Labels() - relabeledAlerts = append(relabeledAlerts, a) - } - return relabeledAlerts -} - -// setMore signals that the alert queue has items. -func (n *Manager) setMore() { - // If we cannot send on the channel, it means the signal already exists - // and has not been consumed yet. - select { - case n.more <- struct{}{}: - default: - } -} - -// Alertmanagers returns a slice of Alertmanager URLs. -func (n *Manager) Alertmanagers() []*url.URL { - n.mtx.RLock() - amSets := n.alertmanagers - n.mtx.RUnlock() - - var res []*url.URL - - for _, ams := range amSets { - ams.mtx.RLock() - for _, am := range ams.ams { - res = append(res, am.url()) - } - ams.mtx.RUnlock() - } - - return res -} - -// DroppedAlertmanagers returns a slice of Alertmanager URLs. -func (n *Manager) DroppedAlertmanagers() []*url.URL { - n.mtx.RLock() - amSets := n.alertmanagers - n.mtx.RUnlock() - - var res []*url.URL - - for _, ams := range amSets { - ams.mtx.RLock() - for _, dam := range ams.droppedAms { - res = append(res, dam.url()) - } - ams.mtx.RUnlock() - } - - return res -} - -// sendAll sends the alerts to all configured Alertmanagers concurrently. -// It returns true if the alerts could be sent successfully to at least one Alertmanager. -func (n *Manager) sendAll(alerts ...*Alert) bool { - if len(alerts) == 0 { - return true - } - - begin := time.Now() - - // v1Payload and v2Payload represent 'alerts' marshaled for Alertmanager API - // v1 or v2. Marshaling happens below. Reference here is for caching between - // for loop iterations. - var v1Payload, v2Payload []byte - - n.mtx.RLock() - amSets := n.alertmanagers - n.mtx.RUnlock() - - var ( - wg sync.WaitGroup - numSuccess atomic.Uint64 - ) - for _, ams := range amSets { - var ( - payload []byte - err error - amAlerts = alerts - ) - - ams.mtx.RLock() - - if len(ams.ams) == 0 { - ams.mtx.RUnlock() - continue - } - - if len(ams.cfg.AlertRelabelConfigs) > 0 { - amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts) - if len(amAlerts) == 0 { - ams.mtx.RUnlock() - continue - } - // We can't use the cached values from previous iteration. - v1Payload, v2Payload = nil, nil - } - - switch ams.cfg.APIVersion { - case config.AlertmanagerAPIVersionV1: - { - if v1Payload == nil { - v1Payload, err = json.Marshal(amAlerts) - if err != nil { - level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v1 failed", "err", err) - ams.mtx.RUnlock() - return false - } - } - - payload = v1Payload - } - case config.AlertmanagerAPIVersionV2: - { - if v2Payload == nil { - openAPIAlerts := alertsToOpenAPIAlerts(amAlerts) - - v2Payload, err = json.Marshal(openAPIAlerts) - if err != nil { - level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v2 failed", "err", err) - ams.mtx.RUnlock() - return false - } - } - - payload = v2Payload - } - default: - { - level.Error(n.logger).Log( - "msg", fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions), - "err", err, - ) - ams.mtx.RUnlock() - return false - } - } - - if len(ams.cfg.AlertRelabelConfigs) > 0 { - // We can't use the cached values on the next iteration. - v1Payload, v2Payload = nil, nil - } - - for _, am := range ams.ams { - wg.Add(1) - - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout)) - defer cancel() - - go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { - if err := n.sendOne(ctx, client, url, payload); err != nil { - level.Error(n.logger).Log("alertmanager", url, "count", count, "msg", "Error sending alert", "err", err) - n.metrics.errors.WithLabelValues(url).Inc() - } else { - numSuccess.Inc() - } - n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds()) - n.metrics.sent.WithLabelValues(url).Add(float64(len(amAlerts))) - - wg.Done() - }(ctx, ams.client, am.url().String(), payload, len(amAlerts)) - } - - ams.mtx.RUnlock() - } - - wg.Wait() - - return numSuccess.Load() > 0 -} - -func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts { - openAPIAlerts := models.PostableAlerts{} - for _, a := range alerts { - start := strfmt.DateTime(a.StartsAt) - end := strfmt.DateTime(a.EndsAt) - openAPIAlerts = append(openAPIAlerts, &models.PostableAlert{ - Annotations: labelsToOpenAPILabelSet(a.Annotations), - EndsAt: end, - StartsAt: start, - Alert: models.Alert{ - GeneratorURL: strfmt.URI(a.GeneratorURL), - Labels: labelsToOpenAPILabelSet(a.Labels), - }, - }) - } - - return openAPIAlerts -} - -func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet { - apiLabelSet := models.LabelSet{} - modelLabelSet.Range(func(label labels.Label) { - apiLabelSet[label.Name] = label.Value - }) - - return apiLabelSet -} - -func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []byte) error { - req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(b)) - if err != nil { - return err - } - req.Header.Set("User-Agent", userAgent) - req.Header.Set("Content-Type", contentTypeJSON) - resp, err := n.opts.Do(ctx, c, req) - if err != nil { - return err - } - defer func() { - io.Copy(io.Discard, resp.Body) - resp.Body.Close() - }() - - // Any HTTP status 2xx is OK. - //nolint:usestdlibvars - if resp.StatusCode/100 != 2 { - return fmt.Errorf("bad response status %s", resp.Status) - } - - return nil -} - -// Stop signals the notification manager to shut down and immediately returns. -// -// Run will return once the notification manager has successfully shut down. -// -// The manager will optionally drain any queued notifications before shutting down. -// -// Stop is safe to call multiple times. -func (n *Manager) Stop() { - level.Info(n.logger).Log("msg", "Stopping notification manager...") - - n.stopOnce.Do(func() { - close(n.stopRequested) - }) -} - -// Alertmanager holds Alertmanager endpoint information. -type alertmanager interface { - url() *url.URL -} - -type alertmanagerLabels struct{ labels.Labels } - -const pathLabel = "__alerts_path__" - -func (a alertmanagerLabels) url() *url.URL { - return &url.URL{ - Scheme: a.Get(model.SchemeLabel), - Host: a.Get(model.AddressLabel), - Path: a.Get(pathLabel), - } -} - -// alertmanagerSet contains a set of Alertmanagers discovered via a group of service -// discovery definitions that have a common configuration on how alerts should be sent. -type alertmanagerSet struct { - cfg *config.AlertmanagerConfig - client *http.Client - - metrics *alertMetrics - - mtx sync.RWMutex - ams []alertmanager - droppedAms []alertmanager - logger log.Logger -} - -func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { - client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager") - if err != nil { - return nil, err - } - t := client.Transport - - if cfg.SigV4Config != nil { - t, err = sigv4.NewSigV4RoundTripper(cfg.SigV4Config, client.Transport) - if err != nil { - return nil, err - } - } - - client.Transport = t - - s := &alertmanagerSet{ - client: client, - cfg: cfg, - logger: logger, - metrics: metrics, - } - return s, nil -} - -// sync extracts a deduplicated set of Alertmanager endpoints from a list -// of target groups definitions. -func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { - allAms := []alertmanager{} - allDroppedAms := []alertmanager{} - - for _, tg := range tgs { - ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg) - if err != nil { - level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err) - continue - } - allAms = append(allAms, ams...) - allDroppedAms = append(allDroppedAms, droppedAms...) - } - - s.mtx.Lock() - defer s.mtx.Unlock() - // Set new Alertmanagers and deduplicate them along their unique URL. - s.ams = []alertmanager{} - s.droppedAms = []alertmanager{} - s.droppedAms = append(s.droppedAms, allDroppedAms...) - seen := map[string]struct{}{} - - for _, am := range allAms { - us := am.url().String() - if _, ok := seen[us]; ok { - continue - } - - // This will initialize the Counters for the AM to 0. - s.metrics.sent.WithLabelValues(us) - s.metrics.errors.WithLabelValues(us) - - seen[us] = struct{}{} - s.ams = append(s.ams, am) - } -} - -func postPath(pre string, v config.AlertmanagerAPIVersion) string { - alertPushEndpoint := fmt.Sprintf("/api/%v/alerts", string(v)) - return path.Join("/", pre, alertPushEndpoint) -} - -// AlertmanagerFromGroup extracts a list of alertmanagers from a target group -// and an associated AlertmanagerConfig. -func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { - var res []alertmanager - var droppedAlertManagers []alertmanager - lb := labels.NewBuilder(labels.EmptyLabels()) - - for _, tlset := range tg.Targets { - lb.Reset(labels.EmptyLabels()) - - for ln, lv := range tlset { - lb.Set(string(ln), string(lv)) - } - // Set configured scheme as the initial scheme label for overwrite. - lb.Set(model.SchemeLabel, cfg.Scheme) - lb.Set(pathLabel, postPath(cfg.PathPrefix, cfg.APIVersion)) - - // Combine target labels with target group labels. - for ln, lv := range tg.Labels { - if _, ok := tlset[ln]; !ok { - lb.Set(string(ln), string(lv)) - } - } - - preRelabel := lb.Labels() - keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) - if !keep { - droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{preRelabel}) - continue - } - - addr := lb.Get(model.AddressLabel) - if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { - return nil, nil, err - } - - res = append(res, alertmanagerLabels{lb.Labels()}) - } - return res, droppedAlertManagers, nil -} diff --git a/notifier/util.go b/notifier/util.go new file mode 100644 index 0000000000..c21c33a57b --- /dev/null +++ b/notifier/util.go @@ -0,0 +1,49 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "github.com/go-openapi/strfmt" + "github.com/prometheus/alertmanager/api/v2/models" + + "github.com/prometheus/prometheus/model/labels" +) + +func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts { + openAPIAlerts := models.PostableAlerts{} + for _, a := range alerts { + start := strfmt.DateTime(a.StartsAt) + end := strfmt.DateTime(a.EndsAt) + openAPIAlerts = append(openAPIAlerts, &models.PostableAlert{ + Annotations: labelsToOpenAPILabelSet(a.Annotations), + EndsAt: end, + StartsAt: start, + Alert: models.Alert{ + GeneratorURL: strfmt.URI(a.GeneratorURL), + Labels: labelsToOpenAPILabelSet(a.Labels), + }, + }) + } + + return openAPIAlerts +} + +func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet { + apiLabelSet := models.LabelSet{} + modelLabelSet.Range(func(label labels.Label) { + apiLabelSet[label.Name] = label.Value + }) + + return apiLabelSet +} diff --git a/util/testutil/logging.go b/notifier/util_test.go similarity index 60% rename from util/testutil/logging.go rename to notifier/util_test.go index db096ea234..2c1c7d241b 100644 --- a/util/testutil/logging.go +++ b/notifier/util_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2013 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,25 +11,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package testutil +package notifier import ( "testing" - "github.com/go-kit/log" + "github.com/prometheus/alertmanager/api/v2/models" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" ) -type logger struct { - t *testing.T -} - -// NewLogger returns a gokit compatible Logger which calls t.Log. -func NewLogger(t *testing.T) log.Logger { - return logger{t: t} -} - -// Log implements log.Logger. -func (t logger) Log(keyvals ...interface{}) error { - t.t.Log(keyvals...) - return nil +func TestLabelsToOpenAPILabelSet(t *testing.T) { + require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.FromStrings("aaa", "111", "bbb", "222"))) } diff --git a/plugins.yml b/plugins.yml index c7b9d297d0..0541fe4852 100644 --- a/plugins.yml +++ b/plugins.yml @@ -16,6 +16,7 @@ - github.com/prometheus/prometheus/discovery/ovhcloud - github.com/prometheus/prometheus/discovery/puppetdb - github.com/prometheus/prometheus/discovery/scaleway +- github.com/prometheus/prometheus/discovery/stackit - github.com/prometheus/prometheus/discovery/triton - github.com/prometheus/prometheus/discovery/uyuni - github.com/prometheus/prometheus/discovery/vultr diff --git a/plugins/generate.go b/plugins/generate.go index 3db6de4ab0..09a1515a18 100644 --- a/plugins/generate.go +++ b/plugins/generate.go @@ -57,7 +57,7 @@ func main() { // See the License for the specific language governing permissions and // limitations under the License. -// This file is generated by "make plugins". +// Code generated by "make plugins". DO NOT EDIT. package plugins @@ -75,7 +75,7 @@ package plugins log.Fatal(err) } - for i, plugin := range plugins { + for _, plugin := range plugins { _, err = f.WriteString(fmt.Sprintf("\t// Register %s plugin.\n", path.Base(plugin))) if err != nil { log.Fatal(err) @@ -84,12 +84,6 @@ package plugins if err != nil { log.Fatal(err) } - if i < len(plugins)-1 { - _, err = f.WriteString("\n") - if err != nil { - log.Fatal(err) - } - } } _, err = f.WriteString(")\n") diff --git a/plugins/plugins.go b/plugins/plugins.go index e6994e2068..90b1407281 100644 --- a/plugins/plugins.go +++ b/plugins/plugins.go @@ -11,77 +11,57 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file is generated by "make plugins". +// Code generated by "make plugins". DO NOT EDIT. package plugins import ( // Register aws plugin. _ "github.com/prometheus/prometheus/discovery/aws" - // Register azure plugin. _ "github.com/prometheus/prometheus/discovery/azure" - // Register consul plugin. _ "github.com/prometheus/prometheus/discovery/consul" - // Register digitalocean plugin. _ "github.com/prometheus/prometheus/discovery/digitalocean" - // Register dns plugin. _ "github.com/prometheus/prometheus/discovery/dns" - // Register eureka plugin. _ "github.com/prometheus/prometheus/discovery/eureka" - // Register gce plugin. _ "github.com/prometheus/prometheus/discovery/gce" - // Register hetzner plugin. _ "github.com/prometheus/prometheus/discovery/hetzner" - // Register ionos plugin. _ "github.com/prometheus/prometheus/discovery/ionos" - // Register kubernetes plugin. _ "github.com/prometheus/prometheus/discovery/kubernetes" - // Register linode plugin. _ "github.com/prometheus/prometheus/discovery/linode" - // Register marathon plugin. _ "github.com/prometheus/prometheus/discovery/marathon" - // Register moby plugin. _ "github.com/prometheus/prometheus/discovery/moby" - // Register nomad plugin. _ "github.com/prometheus/prometheus/discovery/nomad" - // Register openstack plugin. _ "github.com/prometheus/prometheus/discovery/openstack" - // Register ovhcloud plugin. _ "github.com/prometheus/prometheus/discovery/ovhcloud" - // Register puppetdb plugin. _ "github.com/prometheus/prometheus/discovery/puppetdb" - // Register scaleway plugin. _ "github.com/prometheus/prometheus/discovery/scaleway" - + // Register stackit plugin. + _ "github.com/prometheus/prometheus/discovery/stackit" // Register triton plugin. _ "github.com/prometheus/prometheus/discovery/triton" - // Register uyuni plugin. _ "github.com/prometheus/prometheus/discovery/uyuni" - // Register vultr plugin. _ "github.com/prometheus/prometheus/discovery/vultr" - // Register xds plugin. _ "github.com/prometheus/prometheus/discovery/xds" - // Register zookeeper plugin. _ "github.com/prometheus/prometheus/discovery/zookeeper" ) diff --git a/prompb/buf.gen.yaml b/prompb/buf.gen.yaml new file mode 100644 index 0000000000..1fda309ea7 --- /dev/null +++ b/prompb/buf.gen.yaml @@ -0,0 +1,5 @@ +version: v2 +plugins: + - local: protoc-gen-gogofast + out: . + opt: [plugins=grpc, paths=source_relative, Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types] diff --git a/prompb/buf.lock b/prompb/buf.lock index 30b0f08479..f9907b4592 100644 --- a/prompb/buf.lock +++ b/prompb/buf.lock @@ -4,7 +4,5 @@ deps: - remote: buf.build owner: gogo repository: protobuf - branch: main - commit: 4df00b267f944190a229ce3695781e99 - digest: b1-sjLgsg7CzrkOrIjBDh3s-l0aMjE6oqTj85-OsoopKAw= - create_time: 2021-08-10T00:14:28.345069Z + commit: e1dbca2775a74a89955a99990de45a53 + digest: shake256:2523041b61927813260d369e632adb1938da2e9a0e10c42c6fca1b38acdb04661046bf20a2d99a7c9fb69676a63f9655147667dca8d49cea1644114fa97c0add diff --git a/prompb/codec.go b/prompb/codec.go index ad30cd5e7b..b2574fd9e1 100644 --- a/prompb/codec.go +++ b/prompb/codec.go @@ -90,6 +90,7 @@ func (h Histogram) ToIntHistogram() *histogram.Histogram { PositiveBuckets: h.GetPositiveDeltas(), NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), NegativeBuckets: h.GetNegativeDeltas(), + CustomValues: h.CustomValues, } } @@ -109,6 +110,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { PositiveBuckets: h.GetPositiveCounts(), NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), NegativeBuckets: h.GetNegativeCounts(), + CustomValues: h.CustomValues, } } // Conversion from integer histogram. diff --git a/prompb/io/prometheus/client/decoder.go b/prompb/io/prometheus/client/decoder.go new file mode 100644 index 0000000000..d4fb4204ca --- /dev/null +++ b/prompb/io/prometheus/client/decoder.go @@ -0,0 +1,782 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package io_prometheus_client //nolint:revive + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "unicode/utf8" + "unsafe" + + proto "github.com/gogo/protobuf/proto" + "github.com/prometheus/common/model" +) + +type MetricStreamingDecoder struct { + in []byte + inPos int + + // TODO(bwplotka): Switch to generator/plugin that won't have those fields accessible e.g. OpaqueAPI + // We leverage the fact those two don't collide. + *MetricFamily // Without Metric, guarded by overridden GetMetric method. + *Metric // Without Label, guarded by overridden GetLabel method. + + mfData []byte + metrics []pos + metricIndex int + + mData []byte + labels []pos +} + +// NewMetricStreamingDecoder returns a Go iterator that unmarshals given protobuf bytes one +// metric family and metric at the time, allowing efficient streaming. +// +// Do not modify MetricStreamingDecoder between iterations as it's reused to save allocations. +// GetGauge, GetCounter, etc are also cached, which means GetGauge will work for counter +// if previously gauge was parsed. It's up to the caller to use Type to decide what +// method to use when checking the value. +// +// TODO(bwplotka): io.Reader approach is possible too, but textparse has access to whole scrape for now. +func NewMetricStreamingDecoder(data []byte) *MetricStreamingDecoder { + return &MetricStreamingDecoder{ + in: data, + MetricFamily: &MetricFamily{}, + Metric: &Metric{}, + metrics: make([]pos, 0, 100), + } +} + +var errInvalidVarint = errors.New("clientpb: invalid varint encountered") + +func (m *MetricStreamingDecoder) NextMetricFamily() error { + b := m.in[m.inPos:] + if len(b) == 0 { + return io.EOF + } + messageLength, varIntLength := proto.DecodeVarint(b) // TODO(bwplotka): Get rid of gogo. + if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 { + return errInvalidVarint + } + totalLength := varIntLength + int(messageLength) + if totalLength > len(b) { + return fmt.Errorf("clientpb: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) + } + m.resetMetricFamily() + m.mfData = b[varIntLength:totalLength] + + m.inPos += totalLength + return m.unmarshalWithoutMetrics(m, m.mfData) +} + +// resetMetricFamily resets all the fields in m to equal the zero value, but re-using slice memory. +func (m *MetricStreamingDecoder) resetMetricFamily() { + m.metrics = m.metrics[:0] + m.metricIndex = 0 + m.MetricFamily.Reset() +} + +func (m *MetricStreamingDecoder) NextMetric() error { + if m.metricIndex >= len(m.metrics) { + return io.EOF + } + + m.resetMetric() + m.mData = m.mfData[m.metrics[m.metricIndex].start:m.metrics[m.metricIndex].end] + if err := m.unmarshalWithoutLabels(m, m.mData); err != nil { + return err + } + m.metricIndex++ + return nil +} + +// resetMetric resets all the fields in m to equal the zero value, but re-using slices memory. +func (m *MetricStreamingDecoder) resetMetric() { + m.labels = m.labels[:0] + m.TimestampMs = 0 + + // TODO(bwplotka): Autogenerate reset functions. + if m.Counter != nil { + m.Counter.Value = 0 + m.Counter.CreatedTimestamp = nil + m.Counter.Exemplar = nil + } + if m.Gauge != nil { + m.Gauge.Value = 0 + } + if m.Histogram != nil { + m.Histogram.SampleCount = 0 + m.Histogram.SampleCountFloat = 0 + m.Histogram.SampleSum = 0 + m.Histogram.Bucket = m.Histogram.Bucket[:0] + m.Histogram.CreatedTimestamp = nil + m.Histogram.Schema = 0 + m.Histogram.ZeroThreshold = 0 + m.Histogram.ZeroCount = 0 + m.Histogram.ZeroCountFloat = 0 + m.Histogram.NegativeSpan = m.Histogram.NegativeSpan[:0] + m.Histogram.NegativeDelta = m.Histogram.NegativeDelta[:0] + m.Histogram.NegativeCount = m.Histogram.NegativeCount[:0] + m.Histogram.PositiveSpan = m.Histogram.PositiveSpan[:0] + m.Histogram.PositiveDelta = m.Histogram.PositiveDelta[:0] + m.Histogram.PositiveCount = m.Histogram.PositiveCount[:0] + m.Histogram.Exemplars = m.Histogram.Exemplars[:0] + } + if m.Summary != nil { + m.Summary.SampleCount = 0 + m.Summary.SampleSum = 0 + m.Summary.Quantile = m.Summary.Quantile[:0] + m.Summary.CreatedTimestamp = nil + } +} + +func (m *MetricStreamingDecoder) GetMetric() { + panic("don't use GetMetric, use Metric directly") +} + +func (m *MetricStreamingDecoder) GetLabel() { + panic("don't use GetLabel, use Label instead") +} + +type scratchBuilder interface { + Add(name, value string) +} + +// Label parses labels into labels scratch builder. Metric name is missing +// given the protobuf metric model and has to be deduced from the metric family name. +// TODO: The method name intentionally hide MetricStreamingDecoder.Metric.Label +// field to avoid direct use (it's not parsed). In future generator will generate +// structs tailored for streaming decoding. +func (m *MetricStreamingDecoder) Label(b scratchBuilder) error { + for _, l := range m.labels { + if err := parseLabel(m.mData[l.start:l.end], b); err != nil { + return err + } + } + return nil +} + +// parseLabel is essentially LabelPair.Unmarshal but directly adding into scratch builder +// and reusing strings. +func parseLabel(dAtA []byte, b scratchBuilder) error { + var name, value string + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return errors.New("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + name = yoloString(dAtA[iNdEx:postIndex]) + if !model.LabelName(name).IsValid() { + return fmt.Errorf("invalid label name: %s", name) + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + value = yoloString(dAtA[iNdEx:postIndex]) + if !utf8.ValidString(value) { + return fmt.Errorf("invalid label value: %s", value) + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if iNdEx > l { + return io.ErrUnexpectedEOF + } + b.Add(name, value) + return nil +} + +func yoloString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +type pos struct { + start, end int +} + +func (m *Metric) unmarshalWithoutLabels(p *MetricStreamingDecoder, dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return errors.New("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + p.labels = append(p.labels, pos{start: iNdEx, end: postIndex}) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Gauge == nil { + m.Gauge = &Gauge{} + } + if err := m.Gauge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counter == nil { + m.Counter = &Counter{} + } + if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Summary == nil { + m.Summary = &Summary{} + } + if err := m.Summary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Untyped", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Untyped == nil { + m.Untyped = &Untyped{} + } + if err := m.Untyped.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Histogram == nil { + m.Histogram = &Histogram{} + } + if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func (m *MetricFamily) unmarshalWithoutMetrics(buf *MetricStreamingDecoder, dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return errors.New("proto: MetricFamily: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricFamily: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + buf.metrics = append(buf.metrics, pos{start: iNdEx, end: postIndex}) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/prompb/io/prometheus/client/decoder_test.go b/prompb/io/prometheus/client/decoder_test.go new file mode 100644 index 0000000000..8697b78fca --- /dev/null +++ b/prompb/io/prometheus/client/decoder_test.go @@ -0,0 +1,171 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package io_prometheus_client //nolint:revive + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" +) + +const ( + testGauge = `name: "go_build_info" +help: "Build information about the main Go module." +type: GAUGE +metric: < + label: < + name: "checksum" + value: "" + > + label: < + name: "path" + value: "github.com/prometheus/client_golang" + > + label: < + name: "version" + value: "(devel)" + > + gauge: < + value: 1 + > +> +metric: < + label: < + name: "checksum" + value: "" + > + label: < + name: "path" + value: "github.com/prometheus/prometheus" + > + label: < + name: "version" + value: "v3.0.0" + > + gauge: < + value: 2 + > +> + +` + testCounter = `name: "go_memstats_alloc_bytes_total" +help: "Total number of bytes allocated, even if freed." +type: COUNTER +unit: "bytes" +metric: < + counter: < + value: 1.546544e+06 + exemplar: < + label: < + name: "dummyID" + value: "42" + > + value: 12 + timestamp: < + seconds: 1625851151 + nanos: 233181499 + > + > + > +> + +` +) + +func TestMetricStreamingDecoder(t *testing.T) { + varintBuf := make([]byte, binary.MaxVarintLen32) + buf := bytes.Buffer{} + for _, m := range []string{testGauge, testCounter} { + mf := &MetricFamily{} + require.NoError(t, proto.UnmarshalText(m, mf)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(mf) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) + } + + d := NewMetricStreamingDecoder(buf.Bytes()) + require.NoError(t, d.NextMetricFamily()) + nextFn := func() error { + for { + err := d.NextMetric() + if errors.Is(err, io.EOF) { + if err := d.NextMetricFamily(); err != nil { + return err + } + continue + } + return err + } + } + + var firstMetricLset labels.Labels + { + require.NoError(t, nextFn()) + + require.Equal(t, "go_build_info", d.GetName()) + require.Equal(t, "Build information about the main Go module.", d.GetHelp()) + require.Equal(t, MetricType_GAUGE, d.GetType()) + + require.Equal(t, float64(1), d.GetGauge().GetValue()) + b := labels.NewScratchBuilder(0) + require.NoError(t, d.Label(&b)) + + firstMetricLset = b.Labels() + + require.Equal(t, `{checksum="", path="github.com/prometheus/client_golang", version="(devel)"}`, firstMetricLset.String()) + } + + { + require.NoError(t, nextFn()) + + require.Equal(t, "go_build_info", d.GetName()) + require.Equal(t, "Build information about the main Go module.", d.GetHelp()) + require.Equal(t, MetricType_GAUGE, d.GetType()) + + require.Equal(t, float64(2), d.GetGauge().GetValue()) + b := labels.NewScratchBuilder(0) + require.NoError(t, d.Label(&b)) + require.Equal(t, `{checksum="", path="github.com/prometheus/prometheus", version="v3.0.0"}`, b.Labels().String()) + } + { + // Different mf now. + require.NoError(t, nextFn()) + + require.Equal(t, "go_memstats_alloc_bytes_total", d.GetName()) + require.Equal(t, "Total number of bytes allocated, even if freed.", d.GetHelp()) + require.Equal(t, "bytes", d.GetUnit()) + require.Equal(t, MetricType_COUNTER, d.GetType()) + + require.Equal(t, 1.546544e+06, d.Metric.GetCounter().GetValue()) + b := labels.NewScratchBuilder(0) + require.NoError(t, d.Label(&b)) + require.Equal(t, `{}`, b.Labels().String()) + } + require.Equal(t, io.EOF, nextFn()) + + // Expect labels and metricBytes to be static and reusable even after parsing. + require.Equal(t, `{checksum="", path="github.com/prometheus/client_golang", version="(devel)"}`, firstMetricLset.String()) +} diff --git a/prompb/io/prometheus/write/v2/codec.go b/prompb/io/prometheus/write/v2/codec.go index 25fa0d4035..4434c525fc 100644 --- a/prompb/io/prometheus/write/v2/codec.go +++ b/prompb/io/prometheus/write/v2/codec.go @@ -196,6 +196,9 @@ func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram } func spansToSpansProto(s []histogram.Span) []BucketSpan { + if len(s) == 0 { + return nil + } spans := make([]BucketSpan, len(s)) for i := 0; i < len(s); i++ { spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} diff --git a/prompb/io/prometheus/write/v2/types.pb.go b/prompb/io/prometheus/write/v2/types.pb.go index d6ea8398f7..1419de217e 100644 --- a/prompb/io/prometheus/write/v2/types.pb.go +++ b/prompb/io/prometheus/write/v2/types.pb.go @@ -6,11 +6,12 @@ package writev2 import ( encoding_binary "encoding/binary" fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. @@ -302,15 +303,10 @@ type Exemplar struct { // value represents an exact example value. This can be useful when the exemplar // is attached to a histogram, which only gives an estimated value through buckets. Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` - // timestamp represents an optional timestamp of the sample in ms. + // timestamp represents the timestamp of the exemplar in ms. // // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go // for conversion from/to time.Time to Prometheus timestamp. - // - // Note that the "optional" keyword is omitted due to - // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields - // Zero value means value not set. If you need to use exactly zero value for - // the timestamp, use 1 millisecond before or after. Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` diff --git a/prompb/io/prometheus/write/v2/types.proto b/prompb/io/prometheus/write/v2/types.proto index 0cc7b8bc4a..ff6c4936bb 100644 --- a/prompb/io/prometheus/write/v2/types.proto +++ b/prompb/io/prometheus/write/v2/types.proto @@ -107,15 +107,10 @@ message Exemplar { // value represents an exact example value. This can be useful when the exemplar // is attached to a histogram, which only gives an estimated value through buckets. double value = 2; - // timestamp represents an optional timestamp of the sample in ms. + // timestamp represents the timestamp of the exemplar in ms. // // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go // for conversion from/to time.Time to Prometheus timestamp. - // - // Note that the "optional" keyword is omitted due to - // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields - // Zero value means value not set. If you need to use exactly zero value for - // the timestamp, use 1 millisecond before or after. int64 timestamp = 3; } diff --git a/prompb/rwcommon/codec_test.go b/prompb/rwcommon/codec_test.go index 2ab95e0d19..b91355c51c 100644 --- a/prompb/rwcommon/codec_test.go +++ b/prompb/rwcommon/codec_test.go @@ -135,12 +135,12 @@ func TestToMetadata(t *testing.T) { func TestToHistogram_Empty(t *testing.T) { t.Run("v1", func(t *testing.T) { - require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "") - require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "") + require.NotNil(t, prompb.Histogram{}.ToIntHistogram()) + require.NotNil(t, prompb.Histogram{}.ToFloatHistogram()) }) t.Run("v2", func(t *testing.T) { - require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "") - require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "") + require.NotNil(t, writev2.Histogram{}.ToIntHistogram()) + require.NotNil(t, writev2.Histogram{}.ToFloatHistogram()) }) } diff --git a/prompb/types.pb.go b/prompb/types.pb.go index 93883daa13..2f5dc77350 100644 --- a/prompb/types.pb.go +++ b/prompb/types.pb.go @@ -402,10 +402,13 @@ type Histogram struct { ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus.Histogram_ResetHint" json:"reset_hint,omitempty"` // timestamp is in ms format, see model/timestamp/timestamp.go for // conversion from time.Time to Prometheus timestamp. - Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // custom_values are not part of the specification, DO NOT use in remote write clients. + // Used only for converting from OpenTelemetry to Prometheus internally. + CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Histogram) Reset() { *m = Histogram{} } @@ -588,6 +591,13 @@ func (m *Histogram) GetTimestamp() int64 { return 0 } +func (m *Histogram) GetCustomValues() []float64 { + if m != nil { + return m.CustomValues + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Histogram) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -1146,76 +1156,77 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 1092 bytes of a gzipped FileDescriptorProto + // 1114 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46, - 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02, + 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0xd9, 0xa0, 0x71, 0x54, 0x16, 0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12, - 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, - 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c, - 0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce, - 0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9, - 0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf, - 0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9, - 0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e, - 0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d, - 0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73, - 0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca, - 0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c, - 0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3, - 0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6, - 0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97, - 0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c, - 0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca, - 0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24, - 0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62, - 0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf, - 0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7, - 0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87, - 0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a, - 0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80, - 0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29, - 0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38, - 0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92, - 0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8, - 0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8, - 0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0, - 0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9, - 0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde, - 0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25, - 0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9, - 0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54, - 0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92, - 0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b, - 0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6, - 0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a, - 0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5, - 0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd, - 0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02, - 0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04, - 0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32, - 0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16, - 0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f, - 0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f, - 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e, - 0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a, - 0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8, - 0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f, - 0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd, - 0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a, - 0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c, - 0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d, - 0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98, - 0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7, - 0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02, - 0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27, - 0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b, - 0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba, - 0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62, - 0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7, - 0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce, - 0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3, - 0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d, - 0x13, 0x09, 0x00, 0x00, + 0xb2, 0x92, 0xdb, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, + 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0xc8, 0x65, 0xfb, 0x02, 0x45, 0xe1, 0xab, 0x5e, 0xf6, 0x11, + 0x8a, 0x1d, 0x92, 0x22, 0x15, 0xa7, 0x40, 0xd3, 0xbb, 0x9d, 0x6f, 0xbe, 0x99, 0xf9, 0xb8, 0x3b, + 0x3b, 0x4b, 0xa8, 0xc9, 0x55, 0xc4, 0x45, 0x27, 0x8a, 0x43, 0x19, 0x12, 0x88, 0xe2, 0xd0, 0xe7, + 0x72, 0xce, 0x97, 0xe2, 0xfe, 0xce, 0x2c, 0x9c, 0x85, 0x08, 0xef, 0xa9, 0x55, 0xc2, 0x70, 0x7f, + 0xd6, 0xa1, 0xd9, 0xe3, 0x32, 0xf6, 0x26, 0x3d, 0x2e, 0xd9, 0x94, 0x49, 0x46, 0x9e, 0x40, 0x49, + 0xe5, 0x70, 0xb4, 0x96, 0xd6, 0x6e, 0xee, 0x3f, 0xea, 0xe4, 0x39, 0x3a, 0x9b, 0xcc, 0xd4, 0x1c, + 0xad, 0x22, 0x4e, 0x31, 0x84, 0x7c, 0x0a, 0xc4, 0x47, 0x6c, 0x7c, 0xc5, 0x7c, 0x6f, 0xb1, 0x1a, + 0x07, 0xcc, 0xe7, 0x8e, 0xde, 0xd2, 0xda, 0x16, 0xb5, 0x13, 0xcf, 0x31, 0x3a, 0xfa, 0xcc, 0xe7, + 0x84, 0x40, 0x69, 0xce, 0x17, 0x91, 0x53, 0x42, 0x3f, 0xae, 0x15, 0xb6, 0x0c, 0x3c, 0xe9, 0x94, + 0x13, 0x4c, 0xad, 0xdd, 0x15, 0x40, 0x5e, 0x89, 0xd4, 0xa0, 0x72, 0xd1, 0xff, 0xba, 0x3f, 0xf8, + 0xb6, 0x6f, 0x6f, 0x29, 0xe3, 0x68, 0x70, 0xd1, 0x1f, 0x75, 0xa9, 0xad, 0x11, 0x0b, 0xca, 0x27, + 0x07, 0x17, 0x27, 0x5d, 0x5b, 0x27, 0x0d, 0xb0, 0x4e, 0xcf, 0x86, 0xa3, 0xc1, 0x09, 0x3d, 0xe8, + 0xd9, 0x06, 0x21, 0xd0, 0x44, 0x4f, 0x8e, 0x95, 0x54, 0xe8, 0xf0, 0xa2, 0xd7, 0x3b, 0xa0, 0x2f, + 0xec, 0x32, 0xa9, 0x42, 0xe9, 0xac, 0x7f, 0x3c, 0xb0, 0x4d, 0x52, 0x87, 0xea, 0x70, 0x74, 0x30, + 0xea, 0x0e, 0xbb, 0x23, 0xbb, 0xe2, 0x3e, 0x05, 0x73, 0xc8, 0xfc, 0x68, 0xc1, 0xc9, 0x0e, 0x94, + 0x5f, 0xb1, 0xc5, 0x32, 0xd9, 0x16, 0x8d, 0x26, 0x06, 0x79, 0x1f, 0x2c, 0xe9, 0xf9, 0x5c, 0x48, + 0xe6, 0x47, 0xf8, 0x9d, 0x06, 0xcd, 0x01, 0x37, 0x84, 0x6a, 0xf7, 0x9a, 0xfb, 0xd1, 0x82, 0xc5, + 0x64, 0x0f, 0xcc, 0x05, 0xbb, 0xe4, 0x0b, 0xe1, 0x68, 0x2d, 0xa3, 0x5d, 0xdb, 0xdf, 0x2e, 0xee, + 0xeb, 0xb9, 0xf2, 0x1c, 0x96, 0x5e, 0xff, 0xfe, 0x70, 0x8b, 0xa6, 0xb4, 0xbc, 0xa0, 0xfe, 0x8f, + 0x05, 0x8d, 0x37, 0x0b, 0xfe, 0x55, 0x06, 0xeb, 0xd4, 0x13, 0x32, 0x9c, 0xc5, 0xcc, 0x27, 0x0f, + 0xc0, 0x9a, 0x84, 0xcb, 0x40, 0x8e, 0xbd, 0x40, 0xa2, 0xec, 0xd2, 0xe9, 0x16, 0xad, 0x22, 0x74, + 0x16, 0x48, 0xf2, 0x01, 0xd4, 0x12, 0xf7, 0xd5, 0x22, 0x64, 0x32, 0x29, 0x73, 0xba, 0x45, 0x01, + 0xc1, 0x63, 0x85, 0x11, 0x1b, 0x0c, 0xb1, 0xf4, 0xb1, 0x8e, 0x46, 0xd5, 0x92, 0xdc, 0x03, 0x53, + 0x4c, 0xe6, 0xdc, 0x67, 0x78, 0x6a, 0xdb, 0x34, 0xb5, 0xc8, 0x23, 0x68, 0xfe, 0xc8, 0xe3, 0x70, + 0x2c, 0xe7, 0x31, 0x17, 0xf3, 0x70, 0x31, 0xc5, 0x13, 0xd4, 0x68, 0x43, 0xa1, 0xa3, 0x0c, 0x24, + 0x1f, 0xa5, 0xb4, 0x5c, 0x97, 0x89, 0xba, 0x34, 0x5a, 0x57, 0xf8, 0x51, 0xa6, 0xed, 0x13, 0xb0, + 0x0b, 0xbc, 0x44, 0x60, 0x05, 0x05, 0x6a, 0xb4, 0xb9, 0x66, 0x26, 0x22, 0x8f, 0xa0, 0x19, 0xf0, + 0x19, 0x93, 0xde, 0x2b, 0x3e, 0x16, 0x11, 0x0b, 0x84, 0x53, 0xc5, 0x1d, 0xbe, 0x57, 0xdc, 0xe1, + 0xc3, 0xe5, 0xe4, 0x25, 0x97, 0xc3, 0x88, 0x05, 0xe9, 0x36, 0x37, 0xb2, 0x18, 0x85, 0x09, 0xf2, + 0x31, 0xdc, 0x59, 0x27, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0x75, 0xee, + 0x67, 0x88, 0x6e, 0x10, 0x51, 0x9d, 0x70, 0xa0, 0x65, 0xb4, 0xb5, 0x9c, 0x88, 0xd2, 0x84, 0x92, + 0x15, 0x85, 0xc2, 0x2b, 0xc8, 0xaa, 0xfd, 0x1b, 0x59, 0x59, 0xcc, 0x5a, 0xd6, 0x3a, 0x49, 0x2a, + 0xab, 0x9e, 0xc8, 0xca, 0xe0, 0x5c, 0xd6, 0x9a, 0x98, 0xca, 0x6a, 0x24, 0xb2, 0x32, 0x38, 0x95, + 0xf5, 0x15, 0x40, 0xcc, 0x05, 0x97, 0xe3, 0xb9, 0xda, 0xfd, 0x26, 0xde, 0xf1, 0x87, 0x45, 0x49, + 0xeb, 0xfe, 0xe9, 0x50, 0xc5, 0x3b, 0xf5, 0x02, 0x49, 0xad, 0x38, 0x5b, 0x6e, 0x36, 0xe0, 0x9d, + 0x37, 0x1a, 0x90, 0x7c, 0x08, 0x8d, 0xc9, 0x52, 0xc8, 0xd0, 0x1f, 0x63, 0xbb, 0x0a, 0xc7, 0x46, + 0x11, 0xf5, 0x04, 0xfc, 0x06, 0x31, 0xf7, 0x73, 0xb0, 0xd6, 0xa9, 0x37, 0xaf, 0x73, 0x05, 0x8c, + 0x17, 0xdd, 0xa1, 0xad, 0x11, 0x13, 0xf4, 0xfe, 0xc0, 0xd6, 0xf3, 0x2b, 0x6d, 0x1c, 0x56, 0xa0, + 0x8c, 0x1f, 0x76, 0x58, 0x07, 0xc8, 0x7b, 0xc3, 0x7d, 0x0a, 0x90, 0x6f, 0xa2, 0x6a, 0xcf, 0xf0, + 0xea, 0x4a, 0xf0, 0xa4, 0xdf, 0xb7, 0x69, 0x6a, 0x29, 0x7c, 0xc1, 0x83, 0x99, 0x9c, 0x63, 0x9b, + 0x37, 0x68, 0x6a, 0xb9, 0x7f, 0x6a, 0x00, 0x23, 0xcf, 0xe7, 0x43, 0x1e, 0x7b, 0x5c, 0xbc, 0xfb, + 0x25, 0xdd, 0x87, 0x8a, 0xc0, 0xf9, 0x20, 0x1c, 0x1d, 0x23, 0x48, 0x31, 0x22, 0x19, 0x1d, 0x69, + 0x48, 0x46, 0x24, 0x5f, 0x80, 0xc5, 0xd3, 0xa9, 0x20, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, + 0xc8, 0x48, 0xe3, 0x72, 0x32, 0xf9, 0x12, 0x60, 0x9e, 0x9d, 0x8e, 0x70, 0x4a, 0x18, 0x7a, 0xf7, + 0xad, 0x67, 0x97, 0xc6, 0x16, 0xe8, 0xee, 0x63, 0x28, 0xe3, 0x17, 0xa8, 0x11, 0x8b, 0x63, 0x59, + 0x4b, 0x46, 0xac, 0x5a, 0x6f, 0x0e, 0x1b, 0x2b, 0x1d, 0x36, 0xee, 0x13, 0x30, 0xcf, 0x93, 0xef, + 0x7c, 0xd7, 0x8d, 0x71, 0x7f, 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xf1, + 0xc6, 0xab, 0xf2, 0xe0, 0x56, 0x7c, 0xca, 0xeb, 0x14, 0x5e, 0x93, 0x4c, 0xa8, 0xfe, 0x36, 0xa1, + 0x46, 0x51, 0x68, 0x1b, 0x4a, 0xf8, 0x36, 0x98, 0xa0, 0x77, 0x9f, 0x27, 0x7d, 0xd4, 0xef, 0x3e, + 0x4f, 0xfa, 0x88, 0xaa, 0xf7, 0x40, 0x01, 0xb4, 0x6b, 0x1b, 0xee, 0x2f, 0x9a, 0x6a, 0x3e, 0x36, + 0x55, 0xbd, 0x27, 0xc8, 0xff, 0xa1, 0x22, 0x24, 0x8f, 0xc6, 0xbe, 0x40, 0x5d, 0x06, 0x35, 0x95, + 0xd9, 0x13, 0xaa, 0xf4, 0xd5, 0x32, 0x98, 0x64, 0xa5, 0xd5, 0x9a, 0xbc, 0x07, 0x55, 0x21, 0x59, + 0x2c, 0x15, 0x3b, 0x99, 0xbc, 0x15, 0xb4, 0x7b, 0x82, 0xdc, 0x05, 0x93, 0x07, 0xd3, 0x31, 0x1e, + 0x8a, 0x72, 0x94, 0x79, 0x30, 0xed, 0x09, 0x72, 0x1f, 0xaa, 0xb3, 0x38, 0x5c, 0x46, 0x5e, 0x30, + 0x73, 0xca, 0x2d, 0xa3, 0x6d, 0xd1, 0xb5, 0x4d, 0x9a, 0xa0, 0x5f, 0xae, 0x70, 0xfa, 0x55, 0xa9, + 0x7e, 0xb9, 0x52, 0xd9, 0x63, 0x16, 0xcc, 0xb8, 0x4a, 0x52, 0x49, 0xb2, 0xa3, 0xdd, 0x13, 0xee, + 0x6f, 0x1a, 0x94, 0x8f, 0xe6, 0xcb, 0xe0, 0x25, 0xd9, 0x85, 0x9a, 0xef, 0x05, 0x63, 0x75, 0xdf, + 0x72, 0xcd, 0x96, 0xef, 0x05, 0xaa, 0x87, 0x7b, 0x02, 0xfd, 0xec, 0x7a, 0xed, 0x4f, 0x1f, 0x24, + 0x9f, 0x5d, 0xa7, 0xfe, 0x4e, 0x7a, 0x08, 0x06, 0x1e, 0xc2, 0xfd, 0xe2, 0x21, 0x60, 0x81, 0x4e, + 0x37, 0x98, 0x84, 0x53, 0x2f, 0x98, 0xe5, 0x27, 0xa0, 0x1e, 0x7a, 0xfc, 0xaa, 0x3a, 0xc5, 0xb5, + 0xfb, 0x0c, 0xaa, 0x19, 0xeb, 0xd6, 0xe5, 0xfd, 0x6e, 0xa0, 0xde, 0xe1, 0x8d, 0xc7, 0x57, 0x27, + 0xff, 0x83, 0x3b, 0xc7, 0xe7, 0x83, 0x83, 0xd1, 0xb8, 0xf0, 0x22, 0xbb, 0x3f, 0x40, 0x03, 0x2b, + 0xf2, 0xe9, 0x7f, 0xbd, 0x7a, 0x7b, 0x60, 0x4e, 0x54, 0x86, 0xec, 0xe6, 0x6d, 0xdf, 0xfa, 0x9a, + 0x2c, 0x20, 0xa1, 0x1d, 0xee, 0xbc, 0xbe, 0xd9, 0xd5, 0x7e, 0xbd, 0xd9, 0xd5, 0xfe, 0xb8, 0xd9, + 0xd5, 0xbe, 0x37, 0x15, 0x3b, 0xba, 0xbc, 0x34, 0xf1, 0x3f, 0xe8, 0xb3, 0xbf, 0x03, 0x00, 0x00, + 0xff, 0xff, 0x8b, 0x63, 0xd6, 0x2e, 0x38, 0x09, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { @@ -1385,6 +1396,18 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.CustomValues) > 0 { + for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.CustomValues[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.CustomValues)*8)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } if m.Timestamp != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) i-- @@ -1397,30 +1420,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.PositiveCounts) > 0 { for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { - f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + f2 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2)) } i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) i-- dAtA[i] = 0x6a } if len(m.PositiveDeltas) > 0 { - var j2 int - dAtA4 := make([]byte, len(m.PositiveDeltas)*10) + var j3 int + dAtA5 := make([]byte, len(m.PositiveDeltas)*10) for _, num := range m.PositiveDeltas { - x3 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x3 >= 1<<7 { - dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) - j2++ - x3 >>= 7 + x4 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x4 >= 1<<7 { + dAtA5[j3] = uint8(uint64(x4)&0x7f | 0x80) + j3++ + x4 >>= 7 } - dAtA4[j2] = uint8(x3) - j2++ + dAtA5[j3] = uint8(x4) + j3++ } - i -= j2 - copy(dAtA[i:], dAtA4[:j2]) - i = encodeVarintTypes(dAtA, i, uint64(j2)) + i -= j3 + copy(dAtA[i:], dAtA5[:j3]) + i = encodeVarintTypes(dAtA, i, uint64(j3)) i-- dAtA[i] = 0x62 } @@ -1440,30 +1463,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.NegativeCounts) > 0 { for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { - f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + f6 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) } i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) i-- dAtA[i] = 0x52 } if len(m.NegativeDeltas) > 0 { - var j6 int - dAtA8 := make([]byte, len(m.NegativeDeltas)*10) + var j7 int + dAtA9 := make([]byte, len(m.NegativeDeltas)*10) for _, num := range m.NegativeDeltas { - x7 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x7 >= 1<<7 { - dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) - j6++ - x7 >>= 7 + x8 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x8 >= 1<<7 { + dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80) + j7++ + x8 >>= 7 } - dAtA8[j6] = uint8(x7) - j6++ + dAtA9[j7] = uint8(x8) + j7++ } - i -= j6 - copy(dAtA[i:], dAtA8[:j6]) - i = encodeVarintTypes(dAtA, i, uint64(j6)) + i -= j7 + copy(dAtA[i:], dAtA9[:j7]) + i = encodeVarintTypes(dAtA, i, uint64(j7)) i-- dAtA[i] = 0x4a } @@ -2133,6 +2156,9 @@ func (m *Histogram) Size() (n int) { if m.Timestamp != 0 { n += 1 + sovTypes(uint64(m.Timestamp)) } + if len(m.CustomValues) > 0 { + n += 2 + sovTypes(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3248,6 +3274,60 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { break } } + case 16: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.CustomValues) == 0 { + m.CustomValues = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType) + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/prompb/types.proto b/prompb/types.proto index 61fc1e0143..8bc69d5b10 100644 --- a/prompb/types.proto +++ b/prompb/types.proto @@ -107,6 +107,10 @@ message Histogram { // timestamp is in ms format, see model/timestamp/timestamp.go for // conversion from time.Time to Prometheus timestamp. int64 timestamp = 15; + + // custom_values are not part of the specification, DO NOT use in remote write clients. + // Used only for converting from OpenTelemetry to Prometheus internally. + repeated double custom_values = 16; } // A BucketSpan defines a number of consecutive buckets with their diff --git a/promql/bench_test.go b/promql/bench_test.go index bd67280294..695b2c1eb4 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -21,10 +21,13 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/teststorage" @@ -86,8 +89,8 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in } } - stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series. - stor.DB.Compact(ctx) + stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series. + stor.Compact(ctx) return nil } @@ -116,7 +119,7 @@ func rangeQueryCases() []benchCase { }, // Holt-Winters and long ranges. { - expr: "holt_winters(a_X[1d], 0.3, 0.3)", + expr: "double_exponential_smoothing(a_X[1d], 0.3, 0.3)", }, { expr: "changes(a_X[1d])", @@ -165,6 +168,9 @@ func rangeQueryCases() []benchCase { { expr: "sum(a_X)", }, + { + expr: "avg(a_X)", + }, { expr: "sum without (l)(h_X)", }, @@ -263,7 +269,7 @@ func rangeQueryCases() []benchCase { func BenchmarkRangeQuery(b *testing.B) { stor := teststorage.New(b) - stor.DB.DisableCompactions() // Don't want auto-compaction disrupting timings. + stor.DisableCompactions() // Don't want auto-compaction disrupting timings. defer stor.Close() opts := promql.EngineOpts{ Logger: nil, @@ -271,7 +277,7 @@ func BenchmarkRangeQuery(b *testing.B) { MaxSamples: 50000000, Timeout: 100 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(b, opts) const interval = 10000 // 10s interval. // A day of data plus 10k steps. @@ -362,7 +368,7 @@ func BenchmarkNativeHistograms(b *testing.B) { for _, tc := range cases { b.Run(tc.name, func(b *testing.B) { - ng := promql.NewEngine(opts) + ng := promqltest.NewTestEngineWithOpts(b, opts) for i := 0; i < b.N; i++ { qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step) if err != nil { @@ -376,6 +382,196 @@ func BenchmarkNativeHistograms(b *testing.B) { } } +func BenchmarkNativeHistogramsCustomBuckets(b *testing.B) { + testStorage := teststorage.New(b) + defer testStorage.Close() + + app := testStorage.Appender(context.TODO()) + if err := generateNativeHistogramCustomBucketsSeries(app, 3000); err != nil { + b.Fatal(err) + } + if err := app.Commit(); err != nil { + b.Fatal(err) + } + + start := time.Unix(0, 0) + end := start.Add(2 * time.Hour) + step := time.Second * 30 + + cases := []struct { + name string + query string + }{ + { + name: "sum", + query: "sum(native_histogram_custom_bucket_series)", + }, + { + name: "sum rate with short rate interval", + query: "sum(rate(native_histogram_custom_bucket_series[2m]))", + }, + { + name: "sum rate with long rate interval", + query: "sum(rate(native_histogram_custom_bucket_series[20m]))", + }, + { + name: "histogram_count with short rate interval", + query: "histogram_count(sum(rate(native_histogram_custom_bucket_series[2m])))", + }, + { + name: "histogram_count with long rate interval", + query: "histogram_count(sum(rate(native_histogram_custom_bucket_series[20m])))", + }, + } + + opts := promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 50000000, + Timeout: 100 * time.Second, + EnableAtModifier: true, + EnableNegativeOffset: true, + } + + b.ResetTimer() + b.ReportAllocs() + + for _, tc := range cases { + b.Run(tc.name, func(b *testing.B) { + ng := promqltest.NewTestEngineWithOpts(b, opts) + for i := 0; i < b.N; i++ { + qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step) + if err != nil { + b.Fatal(err) + } + if result := qry.Exec(context.Background()); result.Err != nil { + b.Fatal(result.Err) + } + } + }) + } +} + +func BenchmarkInfoFunction(b *testing.B) { + // Initialize test storage and generate test series data. + testStorage := teststorage.New(b) + defer testStorage.Close() + + start := time.Unix(0, 0) + end := start.Add(2 * time.Hour) + step := 30 * time.Second + + // Generate time series data for the benchmark. + generateInfoFunctionTestSeries(b, testStorage, 100, 2000, 3600) + + // Define test cases with queries to benchmark. + cases := []struct { + name string + query string + }{ + { + name: "Joining info metrics with other metrics with group_left example 1", + query: "rate(http_server_request_duration_seconds_count[2m]) * on (job, instance) group_left (k8s_cluster_name) target_info{k8s_cluster_name=\"us-east\"}", + }, + { + name: "Joining info metrics with other metrics with info() example 1", + query: `info(rate(http_server_request_duration_seconds_count[2m]), {k8s_cluster_name="us-east"})`, + }, + { + name: "Joining info metrics with other metrics with group_left example 2", + query: "sum by (k8s_cluster_name, http_status_code) (rate(http_server_request_duration_seconds_count[2m]) * on (job, instance) group_left (k8s_cluster_name) target_info)", + }, + { + name: "Joining info metrics with other metrics with info() example 2", + query: `sum by (k8s_cluster_name, http_status_code) (info(rate(http_server_request_duration_seconds_count[2m]), {k8s_cluster_name=~".+"}))`, + }, + } + + // Benchmark each query type. + for _, tc := range cases { + // Initialize the PromQL engine once for all benchmarks. + opts := promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 50000000, + Timeout: 100 * time.Second, + EnableAtModifier: true, + EnableNegativeOffset: true, + } + engine := promql.NewEngine(opts) + b.Run(tc.name, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() // Stop the timer to exclude setup time. + qry, err := engine.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step) + require.NoError(b, err) + b.StartTimer() + result := qry.Exec(context.Background()) + require.NoError(b, result.Err) + } + }) + } + + // Report allocations. + b.ReportAllocs() +} + +// Helper function to generate target_info and http_server_request_duration_seconds_count series for info function benchmarking. +func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage, infoSeriesNum, interval, numIntervals int) { + tb.Helper() + + ctx := context.Background() + statusCodes := []string{"200", "400", "500"} + + // Generate target_info metrics with instance and job labels, and k8s_cluster_name label. + // Generate http_server_request_duration_seconds_count metrics with instance and job labels, and http_status_code label. + // the classic target_info metrics is gauge type. + metrics := make([]labels.Labels, 0, infoSeriesNum+len(statusCodes)) + for i := 0; i < infoSeriesNum; i++ { + clusterName := "us-east" + if i >= infoSeriesNum/2 { + clusterName = "eu-south" + } + metrics = append(metrics, labels.FromStrings( + "__name__", "target_info", + "instance", "instance"+strconv.Itoa(i), + "job", "job"+strconv.Itoa(i), + "k8s_cluster_name", clusterName, + )) + } + + for _, statusCode := range statusCodes { + metrics = append(metrics, labels.FromStrings( + "__name__", "http_server_request_duration_seconds_count", + "instance", "instance0", + "job", "job0", + "http_status_code", statusCode, + )) + } + + // Append the generated metrics and samples to the storage. + refs := make([]storage.SeriesRef, len(metrics)) + + for i := 0; i < numIntervals; i++ { + a := stor.Appender(context.Background()) + ts := int64(i * interval) + for j, metric := range metrics[:infoSeriesNum] { + ref, _ := a.Append(refs[j], metric, ts, 1) + refs[j] = ref + } + + for j, metric := range metrics[infoSeriesNum:] { + ref, _ := a.Append(refs[j+infoSeriesNum], metric, ts, float64(i)) + refs[j+infoSeriesNum] = ref + } + + require.NoError(tb, a.Commit()) + } + + stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series. + stor.Compact(ctx) +} + func generateNativeHistogramSeries(app storage.Appender, numSeries int) error { commonLabels := []string{labels.MetricName, "native_histogram_series", "foo", "bar"} series := make([][]*histogram.Histogram, numSeries) @@ -411,6 +607,26 @@ func generateNativeHistogramSeries(app storage.Appender, numSeries int) error { return nil } +func generateNativeHistogramCustomBucketsSeries(app storage.Appender, numSeries int) error { + commonLabels := []string{labels.MetricName, "native_histogram_custom_bucket_series", "foo", "bar"} + series := make([][]*histogram.Histogram, numSeries) + for i := range series { + series[i] = tsdbutil.GenerateTestCustomBucketsHistograms(2000) + } + + for sid, histograms := range series { + seriesLabels := labels.FromStrings(append(commonLabels, "h", strconv.Itoa(sid))...) + for i := range histograms { + ts := time.Unix(int64(i*15), 0).UnixMilli() + if _, err := app.AppendHistogram(0, seriesLabels, ts, histograms[i], nil); err != nil { + return err + } + } + } + + return nil +} + func BenchmarkParser(b *testing.B) { cases := []string{ "a", diff --git a/promql/durations.go b/promql/durations.go new file mode 100644 index 0000000000..20fa095d53 --- /dev/null +++ b/promql/durations.go @@ -0,0 +1,150 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "fmt" + "math" + "time" + + "github.com/prometheus/prometheus/promql/parser" +) + +// durationVisitor is a visitor that visits a duration expression and calculates the duration. +type durationVisitor struct { + step time.Duration +} + +func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visitor, error) { + switch n := node.(type) { + case *parser.VectorSelector: + if n.OriginalOffsetExpr != nil { + duration, err := v.calculateDuration(n.OriginalOffsetExpr, true) + if err != nil { + return nil, err + } + n.OriginalOffset = duration + } + case *parser.MatrixSelector: + if n.RangeExpr != nil { + duration, err := v.calculateDuration(n.RangeExpr, false) + if err != nil { + return nil, err + } + n.Range = duration + } + case *parser.SubqueryExpr: + if n.OriginalOffsetExpr != nil { + duration, err := v.calculateDuration(n.OriginalOffsetExpr, true) + if err != nil { + return nil, err + } + n.OriginalOffset = duration + } + if n.StepExpr != nil { + duration, err := v.calculateDuration(n.StepExpr, false) + if err != nil { + return nil, err + } + n.Step = duration + } + if n.RangeExpr != nil { + duration, err := v.calculateDuration(n.RangeExpr, false) + if err != nil { + return nil, err + } + n.Range = duration + } + } + return v, nil +} + +// calculateDuration computes the duration from a duration expression. +func (v *durationVisitor) calculateDuration(expr parser.Expr, allowedNegative bool) (time.Duration, error) { + duration, err := v.evaluateDurationExpr(expr) + if err != nil { + return 0, err + } + if duration <= 0 && !allowedNegative { + return 0, fmt.Errorf("%d:%d: duration must be greater than 0", expr.PositionRange().Start, expr.PositionRange().End) + } + if duration > 1<<63-1 || duration < -1<<63 { + return 0, fmt.Errorf("%d:%d: duration is out of range", expr.PositionRange().Start, expr.PositionRange().End) + } + return time.Duration(duration*1000) * time.Millisecond, nil +} + +// evaluateDurationExpr recursively evaluates a duration expression to a float64 value. +func (v *durationVisitor) evaluateDurationExpr(expr parser.Expr) (float64, error) { + switch n := expr.(type) { + case *parser.NumberLiteral: + return n.Val, nil + case *parser.DurationExpr: + var lhs, rhs float64 + var err error + + if n.LHS != nil { + lhs, err = v.evaluateDurationExpr(n.LHS) + if err != nil { + return 0, err + } + } + + if n.RHS != nil { + rhs, err = v.evaluateDurationExpr(n.RHS) + if err != nil { + return 0, err + } + } + + switch n.Op { + case parser.STEP: + return float64(v.step.Seconds()), nil + case parser.MIN: + return math.Min(lhs, rhs), nil + case parser.MAX: + return math.Max(lhs, rhs), nil + case parser.ADD: + if n.LHS == nil { + // Unary positive duration expression. + return rhs, nil + } + return lhs + rhs, nil + case parser.SUB: + if n.LHS == nil { + // Unary negative duration expression. + return -rhs, nil + } + return lhs - rhs, nil + case parser.MUL: + return lhs * rhs, nil + case parser.DIV: + if rhs == 0 { + return 0, fmt.Errorf("%d:%d: division by zero", expr.PositionRange().Start, expr.PositionRange().End) + } + return lhs / rhs, nil + case parser.MOD: + if rhs == 0 { + return 0, fmt.Errorf("%d:%d: modulo by zero", expr.PositionRange().Start, expr.PositionRange().End) + } + return math.Mod(lhs, rhs), nil + case parser.POW: + return math.Pow(lhs, rhs), nil + default: + return 0, fmt.Errorf("unexpected duration expression operator %q", n.Op) + } + default: + return 0, fmt.Errorf("unexpected duration expression type %T", n) + } +} diff --git a/promql/durations_test.go b/promql/durations_test.go new file mode 100644 index 0000000000..18592a0d0a --- /dev/null +++ b/promql/durations_test.go @@ -0,0 +1,257 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/promql/parser" +) + +func TestDurationVisitor(t *testing.T) { + // Enable experimental duration expression parsing. + parser.ExperimentalDurationExpr = true + t.Cleanup(func() { + parser.ExperimentalDurationExpr = false + }) + complexExpr := `sum_over_time( + rate(metric[5m] offset 1h)[10m:30s] offset 2h + ) + + avg_over_time( + metric[1h + 30m] offset -1h + ) * + count_over_time( + metric[2h * 0.5] + )` + + expr, err := parser.ParseExpr(complexExpr) + require.NoError(t, err) + + err = parser.Walk(&durationVisitor{}, expr, nil) + require.NoError(t, err) + + // Verify different parts of the expression have correct durations. + // This is a binary expression at the top level. + binExpr, ok := expr.(*parser.BinaryExpr) + require.True(t, ok, "Expected binary expression at top level") + + // Left side should be sum_over_time with subquery. + leftCall, ok := binExpr.LHS.(*parser.Call) + require.True(t, ok, "Expected call expression on left side") + require.Equal(t, "sum_over_time", leftCall.Func.Name) + + // Extract the subquery from sum_over_time. + sumSubquery, ok := leftCall.Args[0].(*parser.SubqueryExpr) + require.True(t, ok, "Expected subquery in sum_over_time") + require.Equal(t, 10*time.Minute, sumSubquery.Range) + require.Equal(t, 30*time.Second, sumSubquery.Step) + require.Equal(t, 2*time.Hour, sumSubquery.OriginalOffset) + + // Extract the rate call inside the subquery. + rateCall, ok := sumSubquery.Expr.(*parser.Call) + require.True(t, ok, "Expected rate call in subquery") + require.Equal(t, "rate", rateCall.Func.Name) + + // Extract the matrix selector from rate. + rateMatrix, ok := rateCall.Args[0].(*parser.MatrixSelector) + require.True(t, ok, "Expected matrix selector in rate") + require.Equal(t, 5*time.Minute, rateMatrix.Range) + require.Equal(t, 1*time.Hour, rateMatrix.VectorSelector.(*parser.VectorSelector).OriginalOffset) + + // Right side should be another binary expression (multiplication). + rightBinExpr, ok := binExpr.RHS.(*parser.BinaryExpr) + require.True(t, ok, "Expected binary expression on right side") + + // Left side of multiplication should be avg_over_time. + avgCall, ok := rightBinExpr.LHS.(*parser.Call) + require.True(t, ok, "Expected call expression on left side of multiplication") + require.Equal(t, "avg_over_time", avgCall.Func.Name) + + // Extract the matrix selector from avg_over_time. + avgMatrix, ok := avgCall.Args[0].(*parser.MatrixSelector) + require.True(t, ok, "Expected matrix selector in avg_over_time") + require.Equal(t, 90*time.Minute, avgMatrix.Range) // 1h + 30m + require.Equal(t, -1*time.Hour, avgMatrix.VectorSelector.(*parser.VectorSelector).OriginalOffset) + + // Right side of multiplication should be count_over_time. + countCall, ok := rightBinExpr.RHS.(*parser.Call) + require.True(t, ok, "Expected call expression on right side of multiplication") + require.Equal(t, "count_over_time", countCall.Func.Name) + + // Extract the matrix selector from count_over_time. + countMatrix, ok := countCall.Args[0].(*parser.MatrixSelector) + require.True(t, ok, "Expected matrix selector in count_over_time") + require.Equal(t, 1*time.Hour, countMatrix.Range) // 2h * 0.5 +} + +func TestCalculateDuration(t *testing.T) { + tests := []struct { + name string + expr parser.Expr + expected time.Duration + errorMessage string + allowedNegative bool + }{ + { + name: "addition", + expr: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 5}, + RHS: &parser.NumberLiteral{Val: 10}, + Op: parser.ADD, + }, + expected: 15 * time.Second, + }, + { + name: "subtraction", + expr: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 15}, + RHS: &parser.NumberLiteral{Val: 5}, + Op: parser.SUB, + }, + expected: 10 * time.Second, + }, + { + name: "subtraction with negative", + expr: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 5}, + RHS: &parser.NumberLiteral{Val: 10}, + Op: parser.SUB, + }, + errorMessage: "duration must be greater than 0", + }, + { + name: "multiplication", + expr: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 5}, + RHS: &parser.NumberLiteral{Val: 3}, + Op: parser.MUL, + }, + expected: 15 * time.Second, + }, + { + name: "division", + expr: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 15}, + RHS: &parser.NumberLiteral{Val: 3}, + Op: parser.DIV, + }, + expected: 5 * time.Second, + }, + { + name: "modulo with numbers", + expr: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 17}, + RHS: &parser.NumberLiteral{Val: 5}, + Op: parser.MOD, + }, + expected: 2 * time.Second, + }, + { + name: "power", + expr: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 2}, + RHS: &parser.NumberLiteral{Val: 3}, + Op: parser.POW, + }, + expected: 8 * time.Second, + }, + { + name: "complex expression", + expr: &parser.DurationExpr{ + LHS: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 2}, + RHS: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 3}, + RHS: &parser.NumberLiteral{Val: 4}, + Op: parser.ADD, + }, + Op: parser.MUL, + }, + RHS: &parser.NumberLiteral{Val: 1}, + Op: parser.SUB, + }, + expected: 13 * time.Second, + }, + { + name: "unary negative", + expr: &parser.DurationExpr{ + RHS: &parser.NumberLiteral{Val: 5}, + Op: parser.SUB, + }, + expected: -5 * time.Second, + allowedNegative: true, + }, + { + name: "step", + expr: &parser.DurationExpr{ + Op: parser.STEP, + }, + expected: 1 * time.Second, + }, + { + name: "step multiplication", + expr: &parser.DurationExpr{ + LHS: &parser.DurationExpr{ + Op: parser.STEP, + }, + RHS: &parser.NumberLiteral{Val: 3}, + Op: parser.MUL, + }, + expected: 3 * time.Second, + }, + { + name: "division by zero", + expr: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 5}, + RHS: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 5}, + RHS: &parser.NumberLiteral{Val: 5}, + Op: parser.SUB, + }, + Op: parser.DIV, + }, + errorMessage: "division by zero", + }, + { + name: "modulo by zero", + expr: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 5}, + RHS: &parser.DurationExpr{ + LHS: &parser.NumberLiteral{Val: 5}, + RHS: &parser.NumberLiteral{Val: 5}, + Op: parser.SUB, + }, + Op: parser.MOD, + }, + errorMessage: "modulo by zero", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &durationVisitor{step: 1 * time.Second} + result, err := v.calculateDuration(tt.expr, tt.allowedNegative) + if tt.errorMessage != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.errorMessage) + return + } + require.NoError(t, err) + require.Equal(t, tt.expected, result) + }) + } +} diff --git a/promql/engine.go b/promql/engine.go index 25e67db633..275fc34a40 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -19,6 +19,8 @@ import ( "context" "errors" "fmt" + "io" + "log/slog" "math" "reflect" "runtime" @@ -29,10 +31,9 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -42,9 +43,12 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/schema" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/zeropool" ) @@ -121,11 +125,13 @@ type QueryEngine interface { NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) } +var _ QueryLogger = (*logging.JSONFileLogger)(nil) + // QueryLogger is an interface that can be used to log all the queries logged // by the engine. type QueryLogger interface { - Log(...interface{}) error - Close() error + slog.Handler + io.Closer } // A Query is derived from an a raw query string and can be run against an engine @@ -271,6 +277,8 @@ func contextErr(err error, env string) error { // // 2) Enforcement of the maximum number of concurrent queries. type QueryTracker interface { + io.Closer + // GetMaxConcurrent returns maximum number of concurrent queries that are allowed by this tracker. GetMaxConcurrent() int @@ -285,7 +293,7 @@ type QueryTracker interface { // EngineOpts contains configuration options used when creating a new Engine. type EngineOpts struct { - Logger log.Logger + Logger *slog.Logger Reg prometheus.Registerer MaxSamples int Timeout time.Duration @@ -313,12 +321,19 @@ type EngineOpts struct { // EnablePerStepStats if true allows for per-step stats to be computed on request. Disabled otherwise. EnablePerStepStats bool + + // EnableDelayedNameRemoval delays the removal of the __name__ label to the last step of the query evaluation. + // This is useful in certain scenarios where the __name__ label must be preserved or where applying a + // regex-matcher to the __name__ label may otherwise lead to duplicate labelset errors. + EnableDelayedNameRemoval bool + // EnableTypeAndUnitLabels will allow PromQL Engine to make decisions based on the type and unit labels. + EnableTypeAndUnitLabels bool } // Engine handles the lifetime of queries from beginning to end. // It is connected to a querier. type Engine struct { - logger log.Logger + logger *slog.Logger metrics *engineMetrics timeout time.Duration maxSamplesPerQuery int @@ -330,12 +345,14 @@ type Engine struct { enableAtModifier bool enableNegativeOffset bool enablePerStepStats bool + enableDelayedNameRemoval bool + enableTypeAndUnitLabels bool } // NewEngine returns a new engine. func NewEngine(opts EngineOpts) *Engine { if opts.Logger == nil { - opts.Logger = log.NewNopLogger() + opts.Logger = promslog.NewNopLogger() } queryResultSummary := prometheus.NewSummaryVec(prometheus.SummaryOpts{ @@ -394,7 +411,7 @@ func NewEngine(opts EngineOpts) *Engine { if opts.LookbackDelta == 0 { opts.LookbackDelta = defaultLookbackDelta if l := opts.Logger; l != nil { - level.Debug(l).Log("msg", "Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) + l.Debug("Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) } } @@ -420,9 +437,25 @@ func NewEngine(opts EngineOpts) *Engine { enableAtModifier: opts.EnableAtModifier, enableNegativeOffset: opts.EnableNegativeOffset, enablePerStepStats: opts.EnablePerStepStats, + enableDelayedNameRemoval: opts.EnableDelayedNameRemoval, + enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels, } } +// Close closes ng. +// Callers must ensure the engine is really no longer in use before calling this to avoid +// issues failures like in https://github.com/prometheus/prometheus/issues/15232 +func (ng *Engine) Close() error { + if ng == nil { + return nil + } + + if ng.activeQueryTracker != nil { + return ng.activeQueryTracker.Close() + } + return nil +} + // SetQueryLogger sets the query logger. func (ng *Engine) SetQueryLogger(l QueryLogger) { ng.queryLoggerLock.Lock() @@ -433,7 +466,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { // not make reload fail; only log a warning. err := ng.queryLogger.Close() if err != nil { - level.Warn(ng.logger).Log("msg", "Error while closing the previous query log file", "err", err) + ng.logger.Warn("Error while closing the previous query log file", "err", err) } } @@ -448,7 +481,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { // NewInstantQuery returns an evaluation query for the given expression at the given time. func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error) { - pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) + pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0*time.Second) finishQueue, err := ng.queueActive(ctx, qry) if err != nil { return nil, err @@ -461,9 +494,9 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts if err := ng.validateOpts(expr); err != nil { return nil, err } - *pExpr = PreprocessExpr(expr, ts, ts) + *pExpr, err = PreprocessExpr(expr, ts, ts, 0) - return qry, nil + return qry, err } // NewRangeQuery returns an evaluation query for the given time range and with @@ -485,9 +518,9 @@ func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts Q if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar { return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type())) } - *pExpr = PreprocessExpr(expr, start, end) + *pExpr, err = PreprocessExpr(expr, start, end, interval) - return qry, nil + return qry, err } func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { @@ -530,7 +563,7 @@ func (ng *Engine) validateOpts(expr parser.Expr) error { var atModifierUsed, negativeOffsetUsed bool var validationErr error - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { switch n := node.(type) { case *parser.VectorSelector: if n.Timestamp != nil || n.StartOrEnd == parser.START || n.StartOrEnd == parser.END { @@ -573,7 +606,7 @@ func (ng *Engine) validateOpts(expr parser.Expr) error { return validationErr } -// NewTestQuery: inject special behaviour into Query for testing. +// NewTestQuery injects special behaviour into Query for testing. func (ng *Engine) NewTestQuery(f func(context.Context) error) Query { qry := &query{ q: "test statement", @@ -602,6 +635,9 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota defer func() { ng.queryLoggerLock.RLock() if l := ng.queryLogger; l != nil { + logger := slog.New(l) + f := make([]slog.Attr, 0, 16) // Probably enough up front to not need to reallocate on append. + params := make(map[string]interface{}, 4) params["query"] = q.q if eq, ok := q.Statement().(*parser.EvalStmt); ok { @@ -610,23 +646,23 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota // The step provided by the user is in seconds. params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond)) } - f := []interface{}{"params", params} + f = append(f, slog.Any("params", params)) if err != nil { - f = append(f, "error", err) + f = append(f, slog.Any("error", err)) } - f = append(f, "stats", stats.NewQueryStats(q.Stats())) + f = append(f, slog.Any("stats", stats.NewQueryStats(q.Stats()))) if span := trace.SpanFromContext(ctx); span != nil { - f = append(f, "spanID", span.SpanContext().SpanID()) + f = append(f, slog.Any("spanID", span.SpanContext().SpanID())) } if origin := ctx.Value(QueryOrigin{}); origin != nil { for k, v := range origin.(map[string]interface{}) { - f = append(f, k, v) + f = append(f, slog.Any(k, v)) } } - if err := l.Log(f...); err != nil { - ng.metrics.queryLogFailures.Inc() - level.Error(ng.logger).Log("msg", "can't log query", "err", err) - } + logger.LogAttrs(context.Background(), slog.LevelInfo, "promql query logged", f...) + // TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors? + // ng.metrics.queryLogFailures.Inc() + // ng.logger.Error("can't log query", "err", err) } ng.queryLoggerLock.RUnlock() }() @@ -700,22 +736,24 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr) evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval) // Instant evaluation. This is executed as a range evaluation with one step. - if s.Start == s.End && s.Interval == 0 { + if s.Start.Equal(s.End) && s.Interval == 0 { start := timeMilliseconds(s.Start) evaluator := &evaluator{ startTimestamp: start, endTimestamp: start, interval: 1, - ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, lookbackDelta: s.LookbackDelta, samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ng.enableDelayedNameRemoval, + enableTypeAndUnitLabels: ng.enableTypeAndUnitLabels, + querier: querier, } query.sampleStats.InitStepTracking(start, start, 1) - val, warnings, err := evaluator.Eval(s.Expr) + val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) evalSpanTimer.Finish() @@ -743,9 +781,9 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval // Point might have a different timestamp, force it to the evaluation // timestamp as that is when we ran the evaluation. if len(s.Histograms) > 0 { - vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start} + vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start, DropName: s.DropName} } else { - vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start} + vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start, DropName: s.DropName} } } return vector, warnings, nil @@ -764,15 +802,17 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval startTimestamp: timeMilliseconds(s.Start), endTimestamp: timeMilliseconds(s.End), interval: durationMilliseconds(s.Interval), - ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, lookbackDelta: s.LookbackDelta, samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ng.enableDelayedNameRemoval, + enableTypeAndUnitLabels: ng.enableTypeAndUnitLabels, + querier: querier, } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) - val, warnings, err := evaluator.Eval(s.Expr) + val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) evalSpanTimer.Finish() @@ -887,11 +927,17 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path } if evalRange == 0 { - start -= durationMilliseconds(s.LookbackDelta) + // Reduce the start by one fewer ms than the lookback delta + // because wo want to exclude samples that are precisely the + // lookback delta before the eval time. + start -= durationMilliseconds(s.LookbackDelta) - 1 } else { - // For all matrix queries we want to ensure that we have (end-start) + range selected - // this way we have `range` data before the start time - start -= durationMilliseconds(evalRange) + // For all matrix queries we want to ensure that we have + // (end-start) + range selected this way we have `range` data + // before the start time. We subtract one from the range to + // exclude samples positioned directly at the lower boundary of + // the range. + start -= durationMilliseconds(evalRange) - 1 } offsetMilliseconds := durationMilliseconds(n.OriginalOffset) @@ -976,6 +1022,8 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) { return false, nil } +// checkAndExpandSeriesSet expands expr's UnexpandedSeriesSet into expr's Series. +// If the Series field is already non-nil, it's a no-op. func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations.Annotations, error) { switch e := expr.(type) { case *parser.MatrixSelector: @@ -984,6 +1032,8 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations if e.Series != nil { return nil, nil } + span := trace.SpanFromContext(ctx) + span.AddEvent("expand start", trace.WithAttributes(attribute.String("selector", e.String()))) series, ws, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet) if e.SkipHistogramBuckets { for i := range series { @@ -991,6 +1041,7 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations } } e.Series = series + span.AddEvent("expand end", trace.WithAttributes(attribute.Int("num_series", len(series)))) return ws, err } return nil, nil @@ -1020,18 +1071,19 @@ func (e errWithWarnings) Error() string { return e.err.Error() } // querier and reports errors. On timeout or cancellation of its context it // terminates. type evaluator struct { - ctx context.Context - startTimestamp int64 // Start time in milliseconds. endTimestamp int64 // End time in milliseconds. interval int64 // Interval in milliseconds. maxSamples int currentSamples int - logger log.Logger + logger *slog.Logger lookbackDelta time.Duration samplesStats *stats.QuerySamples noStepSubqueryIntervalFn func(rangeMillis int64) int64 + enableDelayedNameRemoval bool + enableTypeAndUnitLabels bool + querier storage.Querier } // errorf causes a panic with the input formatted into an error. @@ -1057,7 +1109,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] - level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf)) + ev.logger.Error("runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err @@ -1069,10 +1121,13 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp } } -func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { +func (ev *evaluator) Eval(ctx context.Context, expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { defer ev.recover(expr, &ws, &err) - v, ws = ev.eval(expr) + v, ws = ev.eval(ctx, expr) + if ev.enableDelayedNameRemoval { + ev.cleanupMetricLabels(v) + } return v, ws, nil } @@ -1090,8 +1145,9 @@ type EvalNodeHelper struct { Out Vector // Caches. - // funcHistogramQuantile for classic histograms. + // funcHistogramQuantile and funcHistogramFraction for classic histograms. signatureToMetricWithBuckets map[string]*metricWithBuckets + nativeHistogramSamples []Sample lb *labels.Builder lblBuf []byte @@ -1101,6 +1157,9 @@ type EvalNodeHelper struct { rightSigs map[string]Sample matchedSigs map[string]map[uint64]struct{} resultMetric map[string]labels.Labels + + // Additional options for the evaluation. + enableDelayedNameRemoval bool } func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { @@ -1111,13 +1170,70 @@ func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { } } +// resetHistograms prepares the histogram caches by splitting the given vector into native and classic histograms. +func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annotations.Annotations { + var annos annotations.Annotations + + if enh.signatureToMetricWithBuckets == nil { + enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{} + } else { + for _, v := range enh.signatureToMetricWithBuckets { + v.buckets = v.buckets[:0] + } + } + enh.nativeHistogramSamples = enh.nativeHistogramSamples[:0] + + for _, sample := range inVec { + // We are only looking for classic buckets here. Remember + // the histograms for later treatment. + if sample.H != nil { + enh.nativeHistogramSamples = append(enh.nativeHistogramSamples, sample) + continue + } + + upperBound, err := strconv.ParseFloat( + sample.Metric.Get(model.BucketLabel), 64, + ) + if err != nil { + annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), arg.PositionRange())) + continue + } + enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) + mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)] + if !ok { + sample.Metric = labels.NewBuilder(sample.Metric). + Del(excludedLabels...). + Labels() + mb = &metricWithBuckets{sample.Metric, nil} + enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb + } + mb.buckets = append(mb.buckets, Bucket{upperBound, sample.F}) + } + + for idx, sample := range enh.nativeHistogramSamples { + // We have to reconstruct the exact same signature as above for + // a classic histogram, just ignoring any le label. + enh.lblBuf = sample.Metric.Bytes(enh.lblBuf) + if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 { + // At this data point, we have classic histogram + // buckets and a native histogram with the same name and + // labels. Do not evaluate anything. + annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), arg.PositionRange())) + delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) + enh.nativeHistogramSamples[idx].H = nil + continue + } + } + return annos +} + // rangeEval evaluates the given expressions, and then for each step calls // the given funcCall with the values computed for each expression at that // step. The return value is the combination into time series of all the // function call results. // The prepSeries function (if provided) can be used to prepare the helper // for each series, then passed to each call funcCall. -func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) @@ -1128,7 +1244,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // Functions will take string arguments from the expressions, not the values. if e != nil && e.Type() != parser.ValueTypeString { // ev.currentSamples will be updated to the correct value within the ev.eval call. - val, ws := ev.eval(e) + val, ws := ev.eval(ctx, e) warnings.Merge(ws) matrixes[i] = val.(Matrix) @@ -1150,7 +1266,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) biggestLen = len(matrixes[i]) } } - enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)} + enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} type seriesAndTimestamp struct { Series ts int64 @@ -1180,45 +1296,24 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } // Reset number of samples in memory after each timestamp. ev.currentSamples = tempNumSamples // Gather input vectors for this timestamp. for i := range exprs { - vectors[i] = vectors[i][:0] - + var bh []EvalSeriesHelper + var sh []EvalSeriesHelper if prepSeries != nil { - bufHelpers[i] = bufHelpers[i][:0] - } - - for si, series := range matrixes[i] { - switch { - case len(series.Floats) > 0 && series.Floats[0].T == ts: - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts}) - // Move input vectors forward so we don't have to re-scan the same - // past points at the next step. - matrixes[i][si].Floats = series.Floats[1:] - case len(series.Histograms) > 0 && series.Histograms[0].T == ts: - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts}) - matrixes[i][si].Histograms = series.Histograms[1:] - default: - continue - } - if prepSeries != nil { - bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) - } - // Don't add histogram size here because we only - // copy the pointer above, not the whole - // histogram. - ev.currentSamples++ - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } + bh = bufHelpers[i][:0] + sh = seriesHelpers[i] } + vectors[i], bh = ev.gatherVector(ts, matrixes[i], vectors[i], bh, sh) args[i] = vectors[i] - ev.samplesStats.UpdatePeak(ev.currentSamples) + if prepSeries != nil { + bufHelpers[i] = bh + } } // Make the function call. @@ -1240,15 +1335,15 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // If this could be an instant query, shortcut so as not to change sort order. if ev.endTimestamp == ev.startTimestamp { - if result.ContainsSameLabelset() { + if !ev.enableDelayedNameRemoval && result.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } mat := make(Matrix, len(result)) for i, s := range result { if s.H == nil { - mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}} + mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}, DropName: s.DropName} } else { - mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}} + mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}, DropName: s.DropName} } } ev.currentSamples = originalNumSamples + mat.TotalSamples() @@ -1266,7 +1361,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } ss.ts = ts } else { - ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts} + ss = seriesAndTimestamp{Series{Metric: sample.Metric, DropName: sample.DropName}, ts} } addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps) seriess[h] = ss @@ -1290,7 +1385,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) return mat, warnings } -func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, params *fParams) (Matrix, annotations.Annotations) { // Keep a copy of the original point slice so that it can be returned to the pool. origMatrix := slices.Clone(inputMatrix) defer func() { @@ -1300,9 +1395,9 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping } }() - var warnings annotations.Annotations + var annos annotations.Annotations - enh := &EvalNodeHelper{} + enh := &EvalNodeHelper{enableDelayedNameRemoval: ev.enableDelayedNameRemoval} tempNumSamples := ev.currentSamples // Create a mapping from input series to output groups. @@ -1330,47 +1425,44 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping } groups := make([]groupedAggregation, groupCount) - var k int - var ratio float64 var seriess map[uint64]Series + switch aggExpr.Op { case parser.TOPK, parser.BOTTOMK, parser.LIMITK: - if !convertibleToInt64(param) { - ev.errorf("Scalar value %v overflows int64", param) + // Return early if all k values are less than one. + if params.Max() < 1 { + return nil, annos } - k = int(param) - if k > len(inputMatrix) { - k = len(inputMatrix) - } - if k < 1 { - return nil, warnings - } - seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash. + seriess = make(map[uint64]Series, len(inputMatrix)) + case parser.LIMIT_RATIO: - if math.IsNaN(param) { - ev.errorf("Ratio value %v is NaN", param) + // Return early if all r values are zero. + if params.Max() == 0 && params.Min() == 0 { + return nil, annos } - switch { - case param == 0: - return nil, warnings - case param < -1.0: - ratio = -1.0 - warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange())) - case param > 1.0: - ratio = 1.0 - warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange())) - default: - ratio = param + if params.Max() > 1.0 { + annos.Add(annotations.NewInvalidRatioWarning(params.Max(), 1.0, aggExpr.Param.PositionRange())) } - seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash. + if params.Min() < -1.0 { + annos.Add(annotations.NewInvalidRatioWarning(params.Min(), -1.0, aggExpr.Param.PositionRange())) + } + seriess = make(map[uint64]Series, len(inputMatrix)) + case parser.QUANTILE: - if math.IsNaN(param) || param < 0 || param > 1 { - warnings.Add(annotations.NewInvalidQuantileWarning(param, aggExpr.Param.PositionRange())) + if params.HasAnyNaN() { + annos.Add(annotations.NewInvalidQuantileWarning(math.NaN(), aggExpr.Param.PositionRange())) + } + if params.Max() > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(params.Max(), aggExpr.Param.PositionRange())) + } + if params.Min() < 0 { + annos.Add(annotations.NewInvalidQuantileWarning(params.Min(), aggExpr.Param.PositionRange())) } } for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + fParam := params.Next() + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } // Reset number of samples in memory after each timestamp. @@ -1381,17 +1473,17 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping var ws annotations.Annotations switch aggExpr.Op { case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO: - result, ws = ev.aggregationK(aggExpr, k, ratio, inputMatrix, seriesToResult, groups, enh, seriess) + result, ws = ev.aggregationK(aggExpr, fParam, inputMatrix, seriesToResult, groups, enh, seriess) // If this could be an instant query, shortcut so as not to change sort order. - if ev.endTimestamp == ev.startTimestamp { - warnings.Merge(ws) - return result, warnings + if ev.startTimestamp == ev.endTimestamp { + annos.Merge(ws) + return result, annos } default: - ws = ev.aggregation(aggExpr, param, inputMatrix, result, seriesToResult, groups, enh) + ws = ev.aggregation(aggExpr, fParam, inputMatrix, result, seriesToResult, groups, enh) } - warnings.Merge(ws) + annos.Merge(ws) if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) @@ -1416,17 +1508,90 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping } result = result[:dst] } - return result, warnings + return result, annos +} + +// evalSeries generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from series given offset. +// For every storage.Series iterator in series, the method iterates in ev.interval sized steps from ev.startTimestamp until and including ev.endTimestamp, +// collecting every corresponding sample (obtained via ev.vectorSelectorSingle) into a Series. +// All of the generated Series are collected into a Matrix, that gets returned. +func (ev *evaluator) evalSeries(ctx context.Context, series []storage.Series, offset time.Duration, recordOrigT bool) Matrix { + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + + mat := make(Matrix, 0, len(series)) + var prevSS *Series + it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator + for _, s := range series { + if err := contextDone(ctx, "expression evaluation"); err != nil { + ev.error(err) + } + + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) + ss := Series{ + Metric: s.Labels(), + } + + for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { + step++ + origT, f, h, ok := ev.vectorSelectorSingle(it, offset, ts) + if !ok { + continue + } + + if h == nil { + ev.currentSamples++ + ev.samplesStats.IncrementSamplesAtStep(step, 1) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if ss.Floats == nil { + ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) + } + if recordOrigT { + // This is an info metric, where we want to track the original sample timestamp. + // Info metric values should be 1 by convention, therefore we can re-use this + // space in the sample. + f = float64(origT) + } + ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) + } else { + if recordOrigT { + ev.error(fmt.Errorf("this should be an info metric, with float samples: %s", ss.Metric)) + } + + point := HPoint{H: h, T: ts} + histSize := point.size() + ev.currentSamples += histSize + ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize)) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if ss.Histograms == nil { + ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps) + } + ss.Histograms = append(ss.Histograms, point) + } + } + + if len(ss.Floats)+len(ss.Histograms) > 0 { + mat = append(mat, ss) + prevSS = &mat[len(mat)-1] + } + } + ev.samplesStats.UpdatePeak(ev.currentSamples) + return mat } // evalSubquery evaluates given SubqueryExpr and returns an equivalent // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. -func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { +func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { samplesStats := ev.samplesStats // Avoid double counting samples when running a subquery, those samples will be counted in later stage. ev.samplesStats = ev.samplesStats.NewChild() - val, ws := ev.eval(subq) - // But do incorporate the peak from the subquery + val, ws := ev.eval(ctx, subq) + // But do incorporate the peak from the subquery. samplesStats.UpdatePeakFromSubquery(ev.samplesStats) ev.samplesStats = samplesStats mat := val.(Matrix) @@ -1446,24 +1611,53 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele VectorSelector: vs, } for _, s := range mat { + // Set any "NotCounterReset" and "CounterReset" hints in native + // histograms to "UnknownCounterReset" because we might + // otherwise miss a counter reset happening in samples not + // returned by the subquery, or we might over-detect counter + // resets if the sample with a counter reset is returned + // multiple times by a high-res subquery. This intentionally + // does not attempt to be clever (like detecting if we are + // really missing underlying samples or returning underlying + // samples multiple times) because subqueries on counters are + // inherently problematic WRT counter reset handling, so we + // cannot really solve the problem for good. We only want to + // avoid problems that happen due to the explicitly set counter + // reset hints and go back to the behavior we already know from + // float samples. + for i, hp := range s.Histograms { + switch hp.H.CounterResetHint { + case histogram.NotCounterReset, histogram.CounterReset: + h := *hp.H // Shallow copy is sufficient, we only change CounterResetHint. + h.CounterResetHint = histogram.UnknownCounterReset + s.Histograms[i].H = &h + } + } vs.Series = append(vs.Series, NewStorageSeries(s)) } return ms, mat.TotalSamples(), ws } // eval evaluates the given expression as the given AST expression node requires. -func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotations) { +func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, annotations.Annotations) { // This is the top-level evaluation method. // Thus, we check for timeout/cancellation here. - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } + + if ev.endTimestamp < ev.startTimestamp { + return Matrix{}, nil + } + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 // Create a new span to help investigate inner evaluation performances. - ctxWithSpan, span := otel.Tracer("").Start(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) - ev.ctx = ctxWithSpan + ctx, span := otel.Tracer("").Start(ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) defer span.End() + if ss, ok := expr.(interface{ ShortString() string }); ok { + span.SetAttributes(attribute.String("operation", ss.ShortString())) + } switch e := expr.(type) { case *parser.AggregateExpr: @@ -1478,13 +1672,13 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio if e.Op == parser.COUNT_VALUES { valueLabel := param.(*parser.StringLiteral) if !model.LabelName(valueLabel.Val).IsValid() { - ev.errorf("invalid label name %q", valueLabel) + ev.errorf("invalid label name %s", valueLabel) } if !e.Without { sortedGrouping = append(sortedGrouping, valueLabel.Val) slices.Sort(sortedGrouping) } - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh) }, e.Expr) } @@ -1492,18 +1686,14 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio var warnings annotations.Annotations originalNumSamples := ev.currentSamples // param is the number k for topk/bottomk, or q for quantile. - var fParam float64 - if param != nil { - val, ws := ev.eval(param) - warnings.Merge(ws) - fParam = val.(Matrix)[0].Floats[0].F - } + fp, ws := newFParams(ctx, ev, param) + warnings.Merge(ws) // Now fetch the data to be aggregated. - val, ws := ev.eval(e.Expr) + val, ws := ev.eval(ctx, e.Expr) warnings.Merge(ws) inputMatrix := val.(Matrix) - result, ws := ev.rangeEvalAgg(e, sortedGrouping, inputMatrix, fParam) + result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fp) warnings.Merge(ws) ev.currentSamples = originalNumSamples + result.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1521,7 +1711,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio unwrapParenExpr(&arg) vs, ok := arg.(*parser.VectorSelector) if ok { - return ev.rangeEvalTimestampFunctionOverVectorSelector(vs, call, e) + return ev.rangeEvalTimestampFunctionOverVectorSelector(ctx, vs, call, e) } } @@ -1545,7 +1735,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio matrixArgIndex = i matrixArg = true // Replacing parser.SubqueryExpr with parser.MatrixSelector. - val, totalSamples, ws := ev.evalSubquery(subq) + val, totalSamples, ws := ev.evalSubquery(ctx, subq) e.Args[i] = val warnings.Merge(ws) defer func() { @@ -1560,14 +1750,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio // Special handling for functions that work on series not samples. switch e.Func.Name { case "label_replace": - return ev.evalLabelReplace(e.Args) + return ev.evalLabelReplace(ctx, e.Args) case "label_join": - return ev.evalLabelJoin(e.Args) + return ev.evalLabelJoin(ctx, e.Args) + case "info": + return ev.evalInfo(ctx, e.Args) } if !matrixArg { // Does not have a matrix argument. - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, annos := call(v, e.Args, enh) return vec, warnings.Merge(annos) }, e.Args...) @@ -1579,7 +1771,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio otherInArgs := make([]Vector, len(e.Args)) for i, e := range e.Args { if i != matrixArgIndex { - val, ws := ev.eval(e) + val, ws := ev.eval(ctx, e) otherArgs[i] = val.(Matrix) otherInArgs[i] = Vector{Sample{}} inArgs[i] = otherInArgs[i] @@ -1593,7 +1785,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio sel := arg.(*parser.MatrixSelector) selVS := sel.VectorSelector.(*parser.VectorSelector) - ws, err := checkAndExpandSeriesSet(ev.ctx, sel) + ws, err := checkAndExpandSeriesSet(ctx, sel) warnings.Merge(ws) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings}) @@ -1611,12 +1803,19 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio var prevSS *Series inMatrix := make(Matrix, 1) inArgs[matrixArgIndex] = inMatrix - enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} + enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} // Process all the calls for one time series at a time. it := storage.NewBuffer(selRange) var chkIter chunkenc.Iterator + + // The last_over_time function acts like offset; thus, it + // should keep the metric name. For all the other range + // vector functions, the only change needed is to drop the + // metric name in the output. + dropName := e.Func.Name != "last_over_time" + for i, s := range selVS.Series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } ev.currentSamples -= len(floats) + totalHPointSize(histograms) @@ -1629,15 +1828,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio chkIter = s.Iterator(chkIter) it.Reset(chkIter) metric := selVS.Series[i].Labels() - // The last_over_time function acts like offset; thus, it - // should keep the metric name. For all the other range - // vector functions, the only change needed is to drop the - // metric name in the output. - if e.Func.Name != "last_over_time" { - metric = metric.DropMetricName() + if !ev.enableDelayedNameRemoval && dropName { + metric = metric.DropReserved(schema.IsMetadataLabel) } ss := Series{ - Metric: metric, + Metric: metric, + DropName: dropName, } inMatrix[0].Metric = selVS.Series[i].Labels() for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -1700,14 +1896,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio ev.samplesStats.UpdatePeak(ev.currentSamples) if e.Func.Name == "rate" || e.Func.Name == "increase" { - samples := inMatrix[0] - metricName := samples.Metric.Get(labels.MetricName) - if metricName != "" && len(samples.Floats) > 0 && - !strings.HasSuffix(metricName, "_total") && - !strings.HasSuffix(metricName, "_sum") && - !strings.HasSuffix(metricName, "_count") && - !strings.HasSuffix(metricName, "_bucket") { - warnings.Add(annotations.NewPossibleNonCounterInfo(metricName, e.Args[0].PositionRange())) + metricName := inMatrix[0].Metric.Get(labels.MetricName) + if metricName != "" && len(ss.Floats) > 0 { + if ev.enableTypeAndUnitLabels { + // When type-and-unit-labels feature is enabled, check __type__ label + typeLabel := inMatrix[0].Metric.Get("__type__") + if typeLabel != string(model.MetricTypeCounter) { + warnings.Add(annotations.NewPossibleNonCounterLabelInfo(metricName, typeLabel, e.Args[0].PositionRange())) + } + } else if !strings.HasSuffix(metricName, "_total") && + !strings.HasSuffix(metricName, "_sum") && + !strings.HasSuffix(metricName, "_count") && + !strings.HasSuffix(metricName, "_bucket") { + // Fallback to name suffix checking + warnings.Add(annotations.NewPossibleNonCounterInfo(metricName, e.Args[0].PositionRange())) + } } } } @@ -1752,32 +1955,38 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio return Matrix{ Series{ - Metric: createLabelsForAbsentFunction(e.Args[0]), - Floats: newp, + Metric: createLabelsForAbsentFunction(e.Args[0]), + Floats: newp, + DropName: dropName, }, }, warnings } - if mat.ContainsSameLabelset() { + if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } - return mat, warnings case *parser.ParenExpr: - return ev.eval(e.Expr) + return ev.eval(ctx, e.Expr) case *parser.UnaryExpr: - val, ws := ev.eval(e.Expr) + val, ws := ev.eval(ctx, e.Expr) mat := val.(Matrix) if e.Op == parser.SUB { for i := range mat { - mat[i].Metric = mat[i].Metric.DropMetricName() + if !ev.enableDelayedNameRemoval { + mat[i].Metric = mat[i].Metric.DropReserved(schema.IsMetadataLabel) + } + mat[i].DropName = true for j := range mat[i].Floats { mat[i].Floats[j].F = -mat[i].Floats[j].F } + for j := range mat[i].Histograms { + mat[i].Histograms[j].H = mat[i].Histograms[j].H.Copy().Mul(-1) + } } - if mat.ContainsSameLabelset() { + if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } } @@ -1786,7 +1995,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio case *parser.BinaryExpr: switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) @@ -1799,120 +2008,75 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio } switch e.Op { case parser.LAND: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LOR: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LUNLESS: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) default: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh) + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh) + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh) + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case *parser.NumberLiteral: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + span.SetAttributes(attribute.Float64("value", e.Val)) + return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: + span.SetAttributes(attribute.String("value", e.Val)) return String{V: e.Val, T: ev.startTimestamp}, nil case *parser.VectorSelector: - ws, err := checkAndExpandSeriesSet(ev.ctx, e) + ws, err := checkAndExpandSeriesSet(ctx, e) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } - mat := make(Matrix, 0, len(e.Series)) - var prevSS *Series - it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) - var chkIter chunkenc.Iterator - for i, s := range e.Series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { - ev.error(err) - } - chkIter = s.Iterator(chkIter) - it.Reset(chkIter) - ss := Series{ - Metric: e.Series[i].Labels(), - } - - for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { - step++ - _, f, h, ok := ev.vectorSelectorSingle(it, e, ts) - if ok { - if h == nil { - ev.currentSamples++ - ev.samplesStats.IncrementSamplesAtStep(step, 1) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - if ss.Floats == nil { - ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) - } - ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) - } else { - point := HPoint{H: h, T: ts} - histSize := point.size() - ev.currentSamples += histSize - ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize)) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - if ss.Histograms == nil { - ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps) - } - ss.Histograms = append(ss.Histograms, point) - } - } - } - - if len(ss.Floats)+len(ss.Histograms) > 0 { - mat = append(mat, ss) - prevSS = &mat[len(mat)-1] - } - } - ev.samplesStats.UpdatePeak(ev.currentSamples) + mat := ev.evalSeries(ctx, e.Series, e.Offset, false) return mat, ws case *parser.MatrixSelector: if ev.startTimestamp != ev.endTimestamp { panic(errors.New("cannot do range evaluation of matrix selector")) } - return ev.matrixSelector(e) + return ev.matrixSelector(ctx, e) case *parser.SubqueryExpr: offsetMillis := durationMilliseconds(e.Offset) rangeMillis := durationMilliseconds(e.Range) newEv := &evaluator{ endTimestamp: ev.endTimestamp - offsetMillis, - ctx: ev.ctx, currentSamples: ev.currentSamples, maxSamples: ev.maxSamples, logger: ev.logger, lookbackDelta: ev.lookbackDelta, samplesStats: ev.samplesStats.NewChild(), noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ev.enableDelayedNameRemoval, + enableTypeAndUnitLabels: ev.enableTypeAndUnitLabels, + querier: ev.querier, } if e.Step != 0 { @@ -1924,7 +2088,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio // Start with the first timestamp after (ev.startTimestamp - offset - range) // that is aligned with the step (multiple of 'newEv.interval'). newEv.startTimestamp = newEv.interval * ((ev.startTimestamp - offsetMillis - rangeMillis) / newEv.interval) - if newEv.startTimestamp < (ev.startTimestamp - offsetMillis - rangeMillis) { + if newEv.startTimestamp <= (ev.startTimestamp - offsetMillis - rangeMillis) { newEv.startTimestamp += newEv.interval } @@ -1935,7 +2099,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio setOffsetForAtModifier(newEv.startTimestamp, e.Expr) } - res, ws := newEv.eval(e.Expr) + res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) ev.samplesStats.IncrementSamplesAtTimestamp(ev.endTimestamp, newEv.samplesStats.TotalSamples) @@ -1943,22 +2107,24 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio case *parser.StepInvariantExpr: switch ce := e.Expr.(type) { case *parser.StringLiteral, *parser.NumberLiteral: - return ev.eval(ce) + return ev.eval(ctx, ce) } newEv := &evaluator{ startTimestamp: ev.startTimestamp, endTimestamp: ev.startTimestamp, // Always a single evaluation. interval: ev.interval, - ctx: ev.ctx, currentSamples: ev.currentSamples, maxSamples: ev.maxSamples, logger: ev.logger, lookbackDelta: ev.lookbackDelta, samplesStats: ev.samplesStats.NewChild(), noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ev.enableDelayedNameRemoval, + enableTypeAndUnitLabels: ev.enableTypeAndUnitLabels, + querier: ev.querier, } - res, ws := newEv.eval(e.Expr) + res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -1981,7 +2147,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio } for i := range mat { if len(mat[i].Floats)+len(mat[i].Histograms) != 1 { - panic(fmt.Errorf("unexpected number of samples")) + panic(errors.New("unexpected number of samples")) } for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval { if len(mat[i].Floats) > 0 { @@ -2034,8 +2200,8 @@ func reuseOrGetFPointSlices(prevSS *Series, numSteps int) (r []FPoint) { return getFPointSlice(numSteps) } -func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { - ws, err := checkAndExpandSeriesSet(ev.ctx, vs) +func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Context, vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { + ws, err := checkAndExpandSeriesSet(ctx, vs) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -2043,10 +2209,10 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series)) for i, s := range vs.Series { it := s.Iterator(nil) - seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) + seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1) } - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -2057,7 +2223,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec vec := make(Vector, 0, len(vs.Series)) for i, s := range vs.Series { it := seriesIterators[i] - t, _, _, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) + t, _, _, ok := ev.vectorSelectorSingle(it, vs.Offset, enh.Ts) if !ok { continue } @@ -2081,10 +2247,10 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec } // vectorSelectorSingle evaluates an instant vector for the iterator of one time series. -func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) ( +func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, offset time.Duration, ts int64) ( int64, float64, *histogram.FloatHistogram, bool, ) { - refTime := ts - durationMilliseconds(node.Offset) + refTime := ts - durationMilliseconds(offset) var t int64 var v float64 var h *histogram.FloatHistogram @@ -2105,7 +2271,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no if valueType == chunkenc.ValNone || t > refTime { var ok bool t, v, h, ok = it.PeekPrev() - if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { + if !ok || t <= refTime-durationMilliseconds(ev.lookbackDelta) { return 0, 0, nil, false } } @@ -2182,7 +2348,7 @@ func putMatrixSelectorHPointSlice(p []HPoint) { } // matrixSelector evaluates a *parser.MatrixSelector expression. -func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annotations.Annotations) { +func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSelector) (Matrix, annotations.Annotations) { var ( vs = node.VectorSelector.(*parser.VectorSelector) @@ -2193,7 +2359,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota it = storage.NewBuffer(durationMilliseconds(node.Range)) ) - ws, err := checkAndExpandSeriesSet(ev.ctx, node) + ws, err := checkAndExpandSeriesSet(ctx, node) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -2201,7 +2367,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota var chkIter chunkenc.Iterator series := vs.Series for i, s := range series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } chkIter = s.Iterator(chkIter) @@ -2239,20 +2405,20 @@ func (ev *evaluator) matrixIterSlice( mintFloats, mintHistograms := mint, mint // First floats... - if len(floats) > 0 && floats[len(floats)-1].T >= mint { + if len(floats) > 0 && floats[len(floats)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; floats[drop].T < mint; drop++ { + for drop = 0; floats[drop].T <= mint; drop++ { } ev.currentSamples -= drop copy(floats, floats[drop:]) floats = floats[:len(floats)-drop] // Only append points with timestamps after the last timestamp we have. - mintFloats = floats[len(floats)-1].T + 1 + mintFloats = floats[len(floats)-1].T } else { ev.currentSamples -= len(floats) if floats != nil { @@ -2261,14 +2427,14 @@ func (ev *evaluator) matrixIterSlice( } // ...then the same for histograms. TODO(beorn7): Use generics? - if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint { + if len(histograms) > 0 && histograms[len(histograms)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; histograms[drop].T < mint; drop++ { + for drop = 0; histograms[drop].T <= mint; drop++ { } // Rotate the buffer around the drop index so that points before mint can be // reused to store new histograms. @@ -2279,7 +2445,7 @@ func (ev *evaluator) matrixIterSlice( histograms = histograms[:len(histograms)-drop] ev.currentSamples -= totalHPointSize(histograms) // Only append points with timestamps after the last timestamp we have. - mintHistograms = histograms[len(histograms)-1].T + 1 + mintHistograms = histograms[len(histograms)-1].T } else { ev.currentSamples -= totalHPointSize(histograms) if histograms != nil { @@ -2287,6 +2453,11 @@ func (ev *evaluator) matrixIterSlice( } } + if mint == maxt { + // Empty range: return the empty slices. + return floats, histograms + } + soughtValueType := it.Seek(maxt) if soughtValueType == chunkenc.ValNone { if it.Err() != nil { @@ -2303,7 +2474,7 @@ loop: case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: t := buf.AtT() // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintHistograms { + if t > mintHistograms { if histograms == nil { histograms = getMatrixSelectorHPoints() } @@ -2329,7 +2500,7 @@ loop: continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintFloats { + if t > mintFloats { ev.currentSamples++ if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) @@ -2356,6 +2527,11 @@ loop: } else { histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}}) } + if histograms[n].H == nil { + // Make sure to pass non-nil H to AtFloatHistogram so that it does a deep-copy. + // Not an issue in the loop above since that uses an intermediate buffer. + histograms[n].H = &histogram.FloatHistogram{} + } histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H) if value.IsStaleNaN(histograms[n].H.Sum) { histograms = histograms[:n] @@ -2459,7 +2635,7 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi } // VectorBinop evaluates a binary operation between two Vectors, excluding set operators. -func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, error) { +func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper, pos posrange.PositionRange) (Vector, error) { if matching.Card == parser.CardManyToMany { panic("many-to-many only allowed for set operators") } @@ -2533,12 +2709,14 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * fl, fr = fr, fl hl, hr = hr, hl } - floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr) + floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr, pos) if err != nil { lastErr = err + continue } switch { case returnBool: + histogramValue = nil if keep { floatValue = 1.0 } else { @@ -2548,8 +2726,8 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * continue } metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) - if returnBool { - metric = metric.DropMetricName() + if !ev.enableDelayedNameRemoval && returnBool { + metric = metric.DropReserved(schema.IsMetadataLabel) } insertedSigs, exists := matchedSigs[sig] if matching.Card == parser.CardOneToOne { @@ -2573,9 +2751,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * } enh.Out = append(enh.Out, Sample{ - Metric: metric, - F: floatValue, - H: histogramValue, + Metric: metric, + F: floatValue, + H: histogramValue, + DropName: returnBool, }) } return enh.Out, lastErr @@ -2615,8 +2794,9 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } str := string(enh.lblResultBuf) - if shouldDropMetricName(op) { - enh.lb.Del(labels.MetricName) + if changesMetricSchema(op) { + // Setting empty Metadata causes the deletion of those if they exists. + schema.Metadata{}.SetToLabels(enh.lb) } if matching.Card == parser.CardOneToOne { @@ -2641,7 +2821,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. -func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) (Vector, error) { +func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper, pos posrange.PositionRange) (Vector, error) { var lastErr error for _, lhsSample := range lhs { lf, rf := lhsSample.F, rhs.V @@ -2653,9 +2833,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala lf, rf = rf, lf lh, rh = rh, lh } - float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh) + float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh, pos) if err != nil { lastErr = err + continue } // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. @@ -2674,8 +2855,11 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala if keep { lhsSample.F = float lhsSample.H = histogram - if shouldDropMetricName(op) || returnBool { - lhsSample.Metric = lhsSample.Metric.DropMetricName() + if changesMetricSchema(op) || returnBool { + if !ev.enableDelayedNameRemoval { + lhsSample.Metric = lhsSample.Metric.DropReserved(schema.IsMetadataLabel) + } + lhsSample.DropName = true } enh.Out = append(enh.Out, lhsSample) } @@ -2717,71 +2901,104 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { } // vectorElemBinop evaluates a binary operation between two Vector elements. -func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { - switch op { - case parser.ADD: - if hlhs != nil && hrhs != nil { - res, err := hlhs.Copy().Add(hrhs) - if err != nil { - return 0, nil, false, err +func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram, pos posrange.PositionRange) (float64, *histogram.FloatHistogram, bool, error) { + opName := parser.ItemTypeStr[op] + switch { + case hlhs == nil && hrhs == nil: + { + switch op { + case parser.ADD: + return lhs + rhs, nil, true, nil + case parser.SUB: + return lhs - rhs, nil, true, nil + case parser.MUL: + return lhs * rhs, nil, true, nil + case parser.DIV: + return lhs / rhs, nil, true, nil + case parser.POW: + return math.Pow(lhs, rhs), nil, true, nil + case parser.MOD: + return math.Mod(lhs, rhs), nil, true, nil + case parser.EQLC: + return lhs, nil, lhs == rhs, nil + case parser.NEQ: + return lhs, nil, lhs != rhs, nil + case parser.GTR: + return lhs, nil, lhs > rhs, nil + case parser.LSS: + return lhs, nil, lhs < rhs, nil + case parser.GTE: + return lhs, nil, lhs >= rhs, nil + case parser.LTE: + return lhs, nil, lhs <= rhs, nil + case parser.ATAN2: + return math.Atan2(lhs, rhs), nil, true, nil } - return 0, res.Compact(0), true, nil } - return lhs + rhs, nil, true, nil - case parser.SUB: - if hlhs != nil && hrhs != nil { - res, err := hlhs.Copy().Sub(hrhs) - if err != nil { - return 0, nil, false, err + case hlhs == nil && hrhs != nil: + { + switch op { + case parser.MUL: + return 0, hrhs.Copy().Mul(lhs).Compact(0), true, nil + case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", opName, "histogram", pos) } - return 0, res.Compact(0), true, nil } - return lhs - rhs, nil, true, nil - case parser.MUL: - if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Mul(rhs), true, nil + case hlhs != nil && hrhs == nil: + { + switch op { + case parser.MUL: + return 0, hlhs.Copy().Mul(rhs).Compact(0), true, nil + case parser.DIV: + return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil + case parser.ADD, parser.SUB, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "float", pos) + } } - if hlhs == nil && hrhs != nil { - return 0, hrhs.Copy().Mul(lhs), true, nil + case hlhs != nil && hrhs != nil: + { + switch op { + case parser.ADD: + res, err := hlhs.Copy().Add(hrhs) + if err != nil { + return 0, nil, false, err + } + return 0, res.Compact(0), true, nil + case parser.SUB: + res, err := hlhs.Copy().Sub(hrhs) + if err != nil { + return 0, nil, false, err + } + return 0, res.Compact(0), true, nil + case parser.EQLC: + // This operation expects that both histograms are compacted. + return 0, hlhs, hlhs.Equals(hrhs), nil + case parser.NEQ: + // This operation expects that both histograms are compacted. + return 0, hlhs, !hlhs.Equals(hrhs), nil + case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "histogram", pos) + } } - return lhs * rhs, nil, true, nil - case parser.DIV: - if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Div(rhs), true, nil - } - return lhs / rhs, nil, true, nil - case parser.POW: - return math.Pow(lhs, rhs), nil, true, nil - case parser.MOD: - return math.Mod(lhs, rhs), nil, true, nil - case parser.EQLC: - return lhs, nil, lhs == rhs, nil - case parser.NEQ: - return lhs, nil, lhs != rhs, nil - case parser.GTR: - return lhs, nil, lhs > rhs, nil - case parser.LSS: - return lhs, nil, lhs < rhs, nil - case parser.GTE: - return lhs, nil, lhs >= rhs, nil - case parser.LTE: - return lhs, nil, lhs <= rhs, nil - case parser.ATAN2: - return math.Atan2(lhs, rhs), nil, true, nil } panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) } type groupedAggregation struct { - seen bool // Was this output groups seen in the input at this timestamp. - hasFloat bool // Has at least 1 float64 sample aggregated. - hasHistogram bool // Has at least 1 histogram sample aggregated. - floatValue float64 - histogramValue *histogram.FloatHistogram - floatMean float64 // Mean, or "compensating value" for Kahan summation. - groupCount int - groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group - heap vectorByValueHeap + floatValue float64 + histogramValue *histogram.FloatHistogram + floatMean float64 + floatKahanC float64 // "Compensating value" for Kahan summation. + groupCount float64 + heap vectorByValueHeap + + // All bools together for better packing within the struct. + seen bool // Was this output groups seen in the input at this timestamp. + hasFloat bool // Has at least 1 float64 sample aggregated. + hasHistogram bool // Has at least 1 histogram sample aggregated. + incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets. + groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group. + incrementalMean bool // True after reverting to incremental calculation of the mean value. } // aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix. @@ -2805,15 +3022,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // Initialize this group if it's the first time we've seen it. if !group.seen { *group = groupedAggregation{ - seen: true, - floatValue: f, - groupCount: 1, + seen: true, + floatValue: f, + floatMean: f, + incompatibleHistograms: false, + groupCount: 1, } switch op { - case parser.AVG: - group.floatMean = f - fallthrough - case parser.SUM: + case parser.AVG, parser.SUM: if h == nil { group.hasFloat = true } else { @@ -2821,17 +3037,46 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix group.hasHistogram = true } case parser.STDVAR, parser.STDDEV: - group.floatMean = f - group.floatValue = 0 + switch { + case h != nil: + // Ignore histograms for STDVAR and STDDEV. + group.seen = false + if op == parser.STDVAR { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stdvar", e.Expr.PositionRange())) + } else { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stddev", e.Expr.PositionRange())) + } + case math.IsNaN(f), math.IsInf(f, 0): + group.floatValue = math.NaN() + default: + group.floatValue = 0 + } case parser.QUANTILE: + if h != nil { + group.seen = false + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("quantile", e.Expr.PositionRange())) + } group.heap = make(vectorByValueHeap, 1) group.heap[0] = Sample{F: f} case parser.GROUP: group.floatValue = 1 + case parser.MIN, parser.MAX: + if h != nil { + group.seen = false + if op == parser.MIN { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("min", e.Expr.PositionRange())) + } else { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("max", e.Expr.PositionRange())) + } + } } continue } + if group.incompatibleHistograms { + continue + } + switch op { case parser.SUM: if h != nil { @@ -2840,6 +3085,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix _, err := group.histogramValue.Add(h) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true } } // Otherwise the aggregation contained floats @@ -2847,23 +3093,59 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // point in copying the histogram in that case. } else { group.hasFloat = true - group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean) + group.floatValue, group.floatKahanC = kahanSumInc(f, group.floatValue, group.floatKahanC) } case parser.AVG: + // For the average calculation of histograms, we use + // incremental mean calculation without the help of + // Kahan summation (but this should change, see + // https://github.com/prometheus/prometheus/issues/14105 + // ). For floats, we improve the accuracy with the help + // of Kahan summation. For a while, we assumed that + // incremental mean calculation combined with Kahan + // summation (see + // https://stackoverflow.com/questions/61665473/is-it-beneficial-for-precision-to-calculate-the-incremental-mean-average + // for inspiration) is generally the preferred solution. + // However, it then turned out that direct mean + // calculation (still in combination with Kahan + // summation) is often more accurate. See discussion in + // https://github.com/prometheus/prometheus/issues/16714 + // . The problem with the direct mean calculation is + // that it can overflow float64 for inputs on which the + // incremental mean calculation works just fine. Our + // current approach is therefore to use direct mean + // calculation as long as we do not overflow (or + // underflow) the running sum. Once the latter would + // happen, we switch to incremental mean calculation. + // This seems to work reasonably well, but note that a + // deeper understanding would be needed to find out if + // maybe an earlier switch to incremental mean + // calculation would be better in terms of accuracy. + // Also, we could apply a number of additional means to + // improve the accuracy, like processing the values in a + // particular order. For now, we decided that the + // current implementation is accurate enough for + // practical purposes, in particular given that changing + // the order of summation would be hard, given how the + // PromQL engine implements aggregations. group.groupCount++ if h != nil { group.hasHistogram = true if group.histogramValue != nil { - left := h.Copy().Div(float64(group.groupCount)) - right := group.histogramValue.Copy().Div(float64(group.groupCount)) + left := h.Copy().Div(group.groupCount) + right := group.histogramValue.Copy().Div(group.groupCount) toAdd, err := left.Sub(right) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true + continue } _, err = group.histogramValue.Add(toAdd) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true + continue } } // Otherwise the aggregation contained floats @@ -2871,36 +3153,47 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // point in copying the histogram in that case. } else { group.hasFloat = true - if math.IsInf(group.floatMean, 0) { - if math.IsInf(f, 0) && (group.floatMean > 0) == (f > 0) { - // The `floatMean` and `s.F` values are `Inf` of the same sign. They - // can't be subtracted, but the value of `floatMean` is correct - // already. - break - } - if !math.IsInf(f, 0) && !math.IsNaN(f) { - // At this stage, the mean is an infinite. If the added - // value is neither an Inf or a Nan, we can keep that mean - // value. - // This is required because our calculation below removes - // the mean value, which would look like Inf += x - Inf and - // end up as a NaN. + if !group.incrementalMean { + newV, newC := kahanSumInc(f, group.floatValue, group.floatKahanC) + if !math.IsInf(newV, 0) { + // The sum doesn't overflow, so we propagate it to the + // group struct and continue with the regular + // calculation of the mean value. + group.floatValue, group.floatKahanC = newV, newC break } + // If we are here, we know that the sum _would_ overflow. So + // instead of continue to sum up, we revert to incremental + // calculation of the mean value from here on. + group.incrementalMean = true + group.floatMean = group.floatValue / (group.groupCount - 1) + group.floatKahanC /= group.groupCount - 1 } - // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.floatMean += f/float64(group.groupCount) - group.floatMean/float64(group.groupCount) + q := (group.groupCount - 1) / group.groupCount + group.floatMean, group.floatKahanC = kahanSumInc( + f/group.groupCount, + q*group.floatMean, + q*group.floatKahanC, + ) } case parser.GROUP: // Do nothing. Required to avoid the panic in `default:` below. case parser.MAX: + if h != nil { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("max", e.Expr.PositionRange())) + continue + } if group.floatValue < f || math.IsNaN(group.floatValue) { group.floatValue = f } case parser.MIN: + if h != nil { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("min", e.Expr.PositionRange())) + continue + } if group.floatValue > f || math.IsNaN(group.floatValue) { group.floatValue = f } @@ -2912,11 +3205,21 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix if h == nil { // Ignore native histograms. group.groupCount++ delta := f - group.floatMean - group.floatMean += delta / float64(group.groupCount) + group.floatMean += delta / group.groupCount group.floatValue += delta * (f - group.floatMean) + } else { + if op == parser.STDVAR { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stdvar", e.Expr.PositionRange())) + } else { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stddev", e.Expr.PositionRange())) + } } case parser.QUANTILE: + if h != nil { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("quantile", e.Expr.PositionRange())) + continue + } group.heap = append(group.heap, Sample{F: f}) default: @@ -2938,20 +3241,25 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) continue } - if aggr.hasHistogram { + switch { + case aggr.incompatibleHistograms: + continue + case aggr.hasHistogram: aggr.histogramValue = aggr.histogramValue.Compact(0) - } else { - aggr.floatValue = aggr.floatMean + case aggr.incrementalMean: + aggr.floatValue = aggr.floatMean + aggr.floatKahanC + default: + aggr.floatValue = aggr.floatValue/aggr.groupCount + aggr.floatKahanC/aggr.groupCount } case parser.COUNT: - aggr.floatValue = float64(aggr.groupCount) + aggr.floatValue = aggr.groupCount case parser.STDVAR: - aggr.floatValue /= float64(aggr.groupCount) + aggr.floatValue /= aggr.groupCount case parser.STDDEV: - aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) + aggr.floatValue = math.Sqrt(aggr.floatValue / aggr.groupCount) case parser.QUANTILE: aggr.floatValue = quantile(q, aggr.heap) @@ -2962,10 +3270,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) continue } - if aggr.hasHistogram { + switch { + case aggr.incompatibleHistograms: + continue + case aggr.hasHistogram: aggr.histogramValue.Compact(0) - } else { - aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term. + default: + aggr.floatValue += aggr.floatKahanC } default: // For other aggregations, we already have the right value. @@ -2973,6 +3284,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix ss := &outputMatrix[ri] addToSeries(ss, enh.Ts, aggr.floatValue, aggr.histogramValue, numSteps) + ss.DropName = inputMatrix[ri].DropName } return annos @@ -2983,7 +3295,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // seriesToResult maps inputMatrix indexes to groups indexes. // For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio. // For a range query, aggregates output in the seriess map. -func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { +func (ev *evaluator) aggregationK(e *parser.AggregateExpr, fParam float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { op := e.Op var s Sample var annos annotations.Annotations @@ -2992,21 +3304,66 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, r float64, inp for i := range groups { groups[i].seen = false } + // advanceRemainingSeries discards any values at the current timestamp `ts` + // for the remaining input series. In range queries, if these values are not + // consumed now, they will no longer be accessible in the next evaluation step. + advanceRemainingSeries := func(ts int64, startIdx int) { + for i := startIdx; i < len(inputMatrix); i++ { + _, _, _ = ev.nextValues(ts, &inputMatrix[i]) + } + } seriesLoop: for si := range inputMatrix { - f, _, ok := ev.nextValues(enh.Ts, &inputMatrix[si]) + f, h, ok := ev.nextValues(enh.Ts, &inputMatrix[si]) if !ok { continue } - s = Sample{Metric: inputMatrix[si].Metric, F: f} + s = Sample{Metric: inputMatrix[si].Metric, F: f, H: h, DropName: inputMatrix[si].DropName} + + var k int64 + var r float64 + switch op { + case parser.TOPK, parser.BOTTOMK, parser.LIMITK: + if !convertibleToInt64(fParam) { + ev.errorf("Scalar value %v overflows int64", fParam) + } + k = int64(fParam) + if k > int64(len(inputMatrix)) { + k = int64(len(inputMatrix)) + } + if k < 1 { + if enh.Ts != ev.endTimestamp { + advanceRemainingSeries(enh.Ts, si+1) + } + return nil, annos + } + case parser.LIMIT_RATIO: + if math.IsNaN(fParam) { + ev.errorf("Ratio value %v is NaN", fParam) + } + switch { + case fParam == 0: + if enh.Ts != ev.endTimestamp { + advanceRemainingSeries(enh.Ts, si+1) + } + return nil, annos + case fParam < -1.0: + r = -1.0 + case fParam > 1.0: + r = 1.0 + default: + r = fParam + } + } group := &groups[seriesToResult[si]] // Initialize this group if it's the first time we've seen it. if !group.seen { // LIMIT_RATIO is a special case, as we may not add this very sample to the heap, // while we also don't know the final size of it. - if op == parser.LIMIT_RATIO { + switch op { + case parser.LIMIT_RATIO: *group = groupedAggregation{ seen: true, heap: make(vectorByValueHeap, 0), @@ -3014,12 +3371,34 @@ seriesLoop: if ratiosampler.AddRatioSample(r, &s) { heap.Push(&group.heap, &s) } - } else { + case parser.LIMITK: *group = groupedAggregation{ seen: true, heap: make(vectorByValueHeap, 1, k), } group.heap[0] = s + case parser.TOPK: + *group = groupedAggregation{ + seen: true, + heap: make(vectorByValueHeap, 0, k), + } + if s.H != nil { + group.seen = false + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("topk", e.PosRange)) + } else { + heap.Push(&group.heap, &s) + } + case parser.BOTTOMK: + *group = groupedAggregation{ + seen: true, + heap: make(vectorByValueHeap, 0, k), + } + if s.H != nil { + group.seen = false + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("bottomk", e.PosRange)) + } else { + heap.Push(&group.heap, &s) + } } continue } @@ -3028,7 +3407,10 @@ seriesLoop: case parser.TOPK: // We build a heap of up to k elements, with the smallest element at heap[0]. switch { - case len(group.heap) < k: + case s.H != nil: + // Ignore histogram sample and add info annotation. + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("topk", e.PosRange)) + case int64(len(group.heap)) < k: heap.Push(&group.heap, &s) case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): // This new element is bigger than the previous smallest element - overwrite that. @@ -3041,7 +3423,10 @@ seriesLoop: case parser.BOTTOMK: // We build a heap of up to k elements, with the biggest element at heap[0]. switch { - case len(group.heap) < k: + case s.H != nil: + // Ignore histogram sample and add info annotation. + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("bottomk", e.PosRange)) + case int64(len(group.heap)) < k: heap.Push((*vectorByReverseValueHeap)(&group.heap), &s) case group.heap[0].F > s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): // This new element is smaller than the previous biggest element - overwrite that. @@ -3052,16 +3437,20 @@ seriesLoop: } case parser.LIMITK: - if len(group.heap) < k { + if int64(len(group.heap)) < k { heap.Push(&group.heap, &s) } // LIMITK optimization: early break if we've added K elem to _every_ group, // especially useful for large timeseries where the user is exploring labels via e.g. // limitk(10, my_metric) - if !group.groupAggrComplete && len(group.heap) == k { + if !group.groupAggrComplete && int64(len(group.heap)) == k { group.groupAggrComplete = true groupsRemaining-- if groupsRemaining == 0 { + // Process other values in the series before breaking the loop in case of range query. + if enh.Ts != ev.endTimestamp { + advanceRemainingSeries(enh.Ts, si+1) + } break seriesLoop } } @@ -3083,18 +3472,22 @@ seriesLoop: mat = make(Matrix, 0, len(groups)) } - add := func(lbls labels.Labels, f float64) { + add := func(lbls labels.Labels, f float64, h *histogram.FloatHistogram, dropName bool) { // If this could be an instant query, add directly to the matrix so the result is in consistent order. if ev.endTimestamp == ev.startTimestamp { - mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}}) + if h != nil { + mat = append(mat, Series{Metric: lbls, Histograms: []HPoint{{T: enh.Ts, H: h}}, DropName: dropName}) + } else { + mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}, DropName: dropName}) + } } else { // Otherwise the results are added into seriess elements. hash := lbls.Hash() ss, ok := seriess[hash] if !ok { - ss = Series{Metric: lbls} + ss = Series{Metric: lbls, DropName: dropName} } - addToSeries(&ss, enh.Ts, f, nil, numSteps) + addToSeries(&ss, enh.Ts, f, h, numSteps) seriess[hash] = ss } } @@ -3109,7 +3502,7 @@ seriesLoop: sort.Sort(sort.Reverse(aggr.heap)) } for _, v := range aggr.heap { - add(v.Metric, v.F) + add(v.Metric, v.F, v.H, v.DropName) } case parser.BOTTOMK: @@ -3118,12 +3511,12 @@ seriesLoop: sort.Sort(sort.Reverse((*vectorByReverseValueHeap)(&aggr.heap))) } for _, v := range aggr.heap { - add(v.Metric, v.F) + add(v.Metric, v.F, v.H, v.DropName) } case parser.LIMITK, parser.LIMIT_RATIO: for _, v := range aggr.heap { - add(v.Metric, v.F) + add(v.Metric, v.F, v.H, v.DropName) } } } @@ -3131,7 +3524,7 @@ seriesLoop: return mat, annos } -// aggregationK evaluates count_values on vec. +// aggregationCountValues evaluates count_values on vec. // Outputs as many series per group as there are values in the input. func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) { type groupCount struct { @@ -3143,7 +3536,11 @@ func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping [] var buf []byte for _, s := range vec { enh.resetBuilder(s.Metric) - enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) + if s.H == nil { + enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) + } else { + enh.lb.Set(valueLabel, s.H.String()) + } metric := enh.lb.Labels() // Considering the count_values() @@ -3175,6 +3572,30 @@ func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping [] return enh.Out, nil } +func (ev *evaluator) cleanupMetricLabels(v parser.Value) { + if v.Type() == parser.ValueTypeMatrix { + mat := v.(Matrix) + for i := range mat { + if mat[i].DropName { + mat[i].Metric = mat[i].Metric.DropReserved(schema.IsMetadataLabel) + } + } + if mat.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + } else if v.Type() == parser.ValueTypeVector { + vec := v.(Vector) + for i := range vec { + if vec[i].DropName { + vec[i].Metric = vec[i].Metric.DropReserved(schema.IsMetadataLabel) + } + } + if vec.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + } +} + func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, numSteps int) { if h == nil { if ss.Floats == nil { @@ -3218,17 +3639,19 @@ func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotat if err == nil { return nil } - metricName := "" + op := parser.ItemTypeStr[e.Op] pos := e.PositionRange() - if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { - return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) - } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { - return annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + if errors.Is(err, annotations.PromQLInfo) || errors.Is(err, annotations.PromQLWarning) { + return annotations.New().Add(err) + } + // TODO(NeerajGartia21): Test the exact annotation output once the testing framework can do so. + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) || errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return annotations.New().Add(annotations.NewIncompatibleBucketLayoutInBinOpWarning(op, pos)) } return nil } -// groupingKey builds and returns the grouping key for the given metric and +// generateGroupingKey builds and returns the grouping key for the given metric and // grouping labels. func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) { if without { @@ -3266,9 +3689,9 @@ func btos(b bool) float64 { return 0 } -// shouldDropMetricName returns whether the metric name should be dropped in the -// result of the op operation. -func shouldDropMetricName(op parser.ItemType) bool { +// changesMetricSchema returns true whether the op operation changes the semantic meaning or +// schema of the metric. +func changesMetricSchema(op parser.ItemType) bool { switch op { case parser.ADD, parser.SUB, parser.DIV, parser.MUL, parser.POW, parser.MOD, parser.ATAN2: return true @@ -3289,11 +3712,11 @@ func formatDate(t time.Time) string { // unwrapParenExpr does the AST equivalent of removing parentheses around a expression. func unwrapParenExpr(e *parser.Expr) { for { - if p, ok := (*e).(*parser.ParenExpr); ok { - *e = p.Expr - } else { + p, ok := (*e).(*parser.ParenExpr) + if !ok { break } + *e = p.Expr } } @@ -3305,15 +3728,20 @@ func unwrapStepInvariantExpr(e parser.Expr) parser.Expr { } // PreprocessExpr wraps all possible step invariant parts of the given expression with -// StepInvariantExpr. It also resolves the preprocessors. -func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr { +// StepInvariantExpr. It also resolves the preprocessors and evaluates duration expressions +// into their numeric values. +func PreprocessExpr(expr parser.Expr, start, end time.Time, step time.Duration) (parser.Expr, error) { detectHistogramStatsDecoding(expr) + if err := parser.Walk(&durationVisitor{step: step}, expr, nil); err != nil { + return nil, err + } + isStepInvariant := preprocessExprHelper(expr, start, end) if isStepInvariant { - return newStepInvariantExpr(expr) + return newStepInvariantExpr(expr), nil } - return expr + return expr, nil } // preprocessExprHelper wraps the child nodes of the expression @@ -3450,32 +3878,27 @@ func setOffsetForAtModifier(evalTime int64, expr parser.Expr) { // required for correctness. func detectHistogramStatsDecoding(expr parser.Expr) { parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { - if n, ok := node.(*parser.BinaryExpr); ok { - detectHistogramStatsDecoding(n.LHS) - detectHistogramStatsDecoding(n.RHS) - return fmt.Errorf("stop") - } - n, ok := (node).(*parser.VectorSelector) if !ok { return nil } - for _, p := range path { - call, ok := p.(*parser.Call) + for i := len(path) - 1; i > 0; i-- { // Walk backwards up the path. + call, ok := path[i].(*parser.Call) if !ok { continue } - if call.Func.Name == "histogram_count" || call.Func.Name == "histogram_sum" { + switch call.Func.Name { + case "histogram_count", "histogram_sum", "histogram_avg": n.SkipHistogramBuckets = true - break - } - if call.Func.Name == "histogram_quantile" || call.Func.Name == "histogram_fraction" { + case "histogram_quantile", "histogram_fraction": n.SkipHistogramBuckets = false - break + default: + continue } + break } - return fmt.Errorf("stop") + return errors.New("stop") }) } @@ -3485,14 +3908,14 @@ func makeInt64Pointer(val int64) *int64 { return valp } -// Add RatioSampler interface to allow unit-testing (previously: Randomizer). +// RatioSampler allows unit-testing (previously: Randomizer). type RatioSampler interface { // Return this sample "offset" between [0.0, 1.0] sampleOffset(ts int64, sample *Sample) float64 AddRatioSample(r float64, sample *Sample) bool } -// Use Hash(labels.String()) / maxUint64 as a "deterministic" +// HashRatioSampler uses Hash(labels.String()) / maxUint64 as a "deterministic" // value in [0.0, 1.0]. type HashRatioSampler struct{} @@ -3502,7 +3925,7 @@ func NewHashRatioSampler() *HashRatioSampler { return &HashRatioSampler{} } -func (s *HashRatioSampler) sampleOffset(ts int64, sample *Sample) float64 { +func (s *HashRatioSampler) sampleOffset(_ int64, sample *Sample) float64 { const ( float64MaxUint64 = float64(math.MaxUint64) ) @@ -3544,5 +3967,49 @@ func newHistogramStatsSeries(series storage.Series) *histogramStatsSeries { } func (s histogramStatsSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { + // Try to reuse the iterator if we can. + if statsIterator, ok := it.(*HistogramStatsIterator); ok { + statsIterator.Reset(s.Series.Iterator(statsIterator.Iterator)) + return statsIterator + } + return NewHistogramStatsIterator(s.Series.Iterator(it)) } + +// gatherVector gathers a Vector for ts from the series in input. +// output is used as a buffer. +// If bufHelpers and seriesHelpers are provided, seriesHelpers[i] is appended to bufHelpers for every input index i. +// The gathered Vector and bufHelper are returned. +func (ev *evaluator) gatherVector(ts int64, input Matrix, output Vector, bufHelpers, seriesHelpers []EvalSeriesHelper) (Vector, []EvalSeriesHelper) { + output = output[:0] + for i, series := range input { + switch { + case len(series.Floats) > 0 && series.Floats[0].T == ts: + s := series.Floats[0] + output = append(output, Sample{Metric: series.Metric, F: s.F, T: ts, DropName: series.DropName}) + // Move input vectors forward so we don't have to re-scan the same + // past points at the next step. + input[i].Floats = series.Floats[1:] + case len(series.Histograms) > 0 && series.Histograms[0].T == ts: + s := series.Histograms[0] + output = append(output, Sample{Metric: series.Metric, H: s.H, T: ts, DropName: series.DropName}) + input[i].Histograms = series.Histograms[1:] + default: + continue + } + if len(seriesHelpers) > 0 { + bufHelpers = append(bufHelpers, seriesHelpers[i]) + } + + // Don't add histogram size here because we only + // copy the pointer above, not the whole + // histogram. + ev.currentSamples++ + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + } + ev.samplesStats.UpdatePeak(ev.currentSamples) + + return output, bufHelpers +} diff --git a/promql/engine_internal_test.go b/promql/engine_internal_test.go index cb501b2fdf..f873b6fabd 100644 --- a/promql/engine_internal_test.go +++ b/promql/engine_internal_test.go @@ -14,10 +14,11 @@ package promql import ( + "bytes" "errors" "testing" - "github.com/go-kit/log" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/promql/parser" @@ -25,11 +26,8 @@ import ( ) func TestRecoverEvaluatorRuntime(t *testing.T) { - var output []interface{} - logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { - output = append(output, keyvals...) - return nil - })) + var output bytes.Buffer + logger := promslog.New(&promslog.Config{Writer: &output}) ev := &evaluator{logger: logger} expr, _ := parser.ParseExpr("sum(up)") @@ -38,7 +36,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) { defer func() { require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0") - require.Contains(t, output, "sum(up)") + require.Contains(t, output.String(), "sum(up)") }() defer ev.recover(expr, nil, &err) @@ -48,7 +46,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) { } func TestRecoverEvaluatorError(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} + ev := &evaluator{logger: promslog.NewNopLogger()} var err error e := errors.New("custom error") @@ -62,7 +60,7 @@ func TestRecoverEvaluatorError(t *testing.T) { } func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} + ev := &evaluator{logger: promslog.NewNopLogger()} var err error var ws annotations.Annotations diff --git a/promql/engine_test.go b/promql/engine_test.go index 523c0613df..ce5ef6efd7 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -19,26 +19,29 @@ import ( "fmt" "math" "os" + "path/filepath" "sort" - "strconv" + "strings" "sync" "testing" "time" "github.com/stretchr/testify/require" - "go.uber.org/goleak" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/stats" - "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) @@ -51,20 +54,13 @@ const ( func TestMain(m *testing.M) { // Enable experimental functions testing parser.EnableExperimentalFunctions = true - goleak.VerifyTestMain(m) + testutil.TolerantVerifyLeak(m) } func TestQueryConcurrency(t *testing.T) { maxConcurrency := 10 - dir, err := os.MkdirTemp("", "test_concurrency") - require.NoError(t, err) - defer os.RemoveAll(dir) - queryTracker := promql.NewActiveQueryTracker(dir, maxConcurrency, nil) - t.Cleanup(func() { - require.NoError(t, queryTracker.Close()) - }) - + queryTracker := promql.NewActiveQueryTracker(t.TempDir(), maxConcurrency, nil) opts := promql.EngineOpts{ Logger: nil, Reg: nil, @@ -72,15 +68,17 @@ func TestQueryConcurrency(t *testing.T) { Timeout: 100 * time.Second, ActiveQueryTracker: queryTracker, } + engine := promqltest.NewTestEngineWithOpts(t, opts) - engine := promql.NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) - defer cancelCtx() + t.Cleanup(cancelCtx) block := make(chan struct{}) processing := make(chan struct{}) done := make(chan int) - defer close(done) + t.Cleanup(func() { + close(done) + }) f := func(context.Context) error { select { @@ -165,7 +163,7 @@ func TestQueryTimeout(t *testing.T) { MaxSamples: 10, Timeout: 5 * time.Millisecond, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -190,7 +188,7 @@ func TestQueryCancel(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -264,9 +262,9 @@ func TestQueryError(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) errStorage := promql.ErrStorage{errors.New("storage error")} - queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { + queryable := storage.QueryableFunc(func(_, _ int64) (storage.Querier, error) { return &errQuerier{err: errStorage}, nil }) ctx, cancelCtx := context.WithCancel(context.Background()) @@ -329,276 +327,276 @@ func TestSelectHintsSetCorrectly(t *testing.T) { { query: "foo", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000}, + {Start: 5001, End: 10000}, }, }, { query: "foo @ 15", start: 10000, expected: []*storage.SelectHints{ - {Start: 10000, End: 15000}, + {Start: 10001, End: 15000}, }, }, { query: "foo @ 1", start: 10000, expected: []*storage.SelectHints{ - {Start: -4000, End: 1000}, + {Start: -3999, End: 1000}, }, }, { query: "foo[2m]", start: 200000, expected: []*storage.SelectHints{ - {Start: 80000, End: 200000, Range: 120000}, + {Start: 80001, End: 200000, Range: 120000}, }, }, { query: "foo[2m] @ 180", start: 200000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, + {Start: 60001, End: 180000, Range: 120000}, }, }, { query: "foo[2m] @ 300", start: 200000, expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000}, + {Start: 180001, End: 300000, Range: 120000}, }, }, { query: "foo[2m] @ 60", start: 200000, expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000}, + {Start: -59999, End: 60000, Range: 120000}, }, }, { query: "foo[2m] offset 2m", start: 300000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, + {Start: 60001, End: 180000, Range: 120000}, }, }, { query: "foo[2m] @ 200 offset 2m", start: 300000, expected: []*storage.SelectHints{ - {Start: -40000, End: 80000, Range: 120000}, + {Start: -39999, End: 80000, Range: 120000}, }, }, { query: "foo[2m:1s]", start: 300000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Step: 1000}, + {Start: 175001, End: 300000, Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s])", start: 300000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, + {Start: 75001, End: 200000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, + {Start: -24999, End: 100000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 165000, End: 290000, Func: "count_over_time", Step: 1000}, + {Start: 165001, End: 290000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 155000, End: 280000, Func: "count_over_time", Step: 1000}, + {Start: 155001, End: 280000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, + {Start: -44999, End: 80000, Func: "count_over_time", Step: 1000}, }, }, { query: "foo", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: 5000, End: 20000, Step: 1000}, + {Start: 5001, End: 20000, Step: 1000}, }, }, { query: "foo @ 15", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: 10000, End: 15000, Step: 1000}, + {Start: 10001, End: 15000, Step: 1000}, }, }, { query: "foo @ 1", start: 10000, end: 20000, expected: []*storage.SelectHints{ - {Start: -4000, End: 1000, Step: 1000}, + {Start: -3999, End: 1000, Step: 1000}, }, }, { query: "rate(foo[2m] @ 180)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 60001, End: 180000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] @ 300)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 180001, End: 300000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] @ 60)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000}, + {Start: -59999, End: 60000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m])", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 80001, End: 500000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000}, + {Start: 60001, End: 380000, Range: 120000, Func: "rate", Step: 1000}, }, }, { query: "rate(foo[2m:1s])", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "rate", Step: 1000}, + {Start: 175001, End: 500000, Func: "rate", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 500000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 165000, End: 490000, Func: "count_over_time", Step: 1000}, + {Start: 165001, End: 490000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + {Start: 175001, End: 300000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, + {Start: 75001, End: 200000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000, expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, + {Start: -24999, End: 100000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 155000, End: 480000, Func: "count_over_time", Step: 1000}, + {Start: 155001, End: 480000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { // When the @ is on the vector selector, the enclosing subquery parameters // don't affect the hint ranges. query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + {Start: 185001, End: 190000, Func: "count_over_time", Step: 1000}, }, }, { query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, + {Start: -44999, End: 80000, Func: "count_over_time", Step: 1000}, }, }, { query: "sum by (dim1) (foo)", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, + {Start: 5001, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, }, }, { query: "sum without (dim1) (foo)", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, + {Start: 5001, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, }, }, { query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000, expected: []*storage.SelectHints{ - {Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000}, + {Start: 9001, End: 10000, Func: "avg_over_time", Range: 1000}, }, }, { query: "sum by (dim1) (max by (dim2) (foo))", start: 10000, expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, + {Start: 5001, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, }, }, { query: "(max by (dim1) (foo))[5s:1s]", start: 10000, expected: []*storage.SelectHints{ - {Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000}, + {Start: 1, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000}, }, }, { query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000, expected: []*storage.SelectHints{ - {Start: 95000, End: 120000, Func: "sum", By: true, Step: 5000}, - {Start: 95000, End: 120000, Func: "max", By: true, Step: 5000}, + {Start: 95001, End: 120000, Func: "sum", By: true, Step: 5000}, + {Start: 95001, End: 120000, Func: "max", By: true, Step: 5000}, }, }, { query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 45001, End: 50000, Step: 1000}, + {Start: 245001, End: 250000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 45001, End: 50000, Step: 1000}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, + {Start: 48001, End: 50000, Step: 1000, Func: "rate", Range: 2000}, + {Start: 245001, End: 250000, Step: 1000}, + {Start: 895001, End: 900000, Step: 1000}, }, }, { query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, + {Start: 43001, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 95001, End: 500000, Step: 1000}, }, }, { query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000, expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 655000, End: 780000, Step: 1000, Func: "rate"}, + {Start: 43001, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95001, End: 500000, Step: 1000}, + {Start: 655001, End: 780000, Step: 1000, Func: "rate"}, }, }, { // Hints are based on the inner most subquery timestamp. query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000, expected: []*storage.SelectHints{ - {Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000}, + {Start: -149999, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000}, }, }, { // Hints are based on the inner most subquery timestamp. query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`, expected: []*storage.SelectHints{ - {Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000}, + {Start: 2800001, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000}, }, }, } { t.Run(tc.query, func(t *testing.T) { - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) hintsRecorder := &noopHintRecordingQueryable{} var ( @@ -629,7 +627,7 @@ func TestEngineShutdown(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) block := make(chan struct{}) @@ -756,7 +754,7 @@ load 10s Interval: 5 * time.Second, }, { - Query: `count_values("wrong label!", metric)`, + Query: `count_values("wrong label!\xff", metric)`, ShouldError: true, }, } @@ -765,7 +763,7 @@ load 10s t.Run(fmt.Sprintf("%d query=%s", i, c.Query), func(t *testing.T) { var err error var qry promql.Query - engine := newTestEngine() + engine := newTestEngine(t) if c.Interval == 0 { qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) } else { @@ -943,22 +941,20 @@ load 10s }, }, { - Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]", + Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 10, - TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 20/5 (using 59s so we always return 6 samples - // as if we run a query on 00 looking back 60 seconds we will return 7 samples; - // see next test). + TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 4 TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 24, }, }, { - Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", + Query: "max_over_time(metricWith1SampleEvery10Seconds[61s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 11, TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) * 4 + 2 as - // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. + // max_over_time(metricWith1SampleEvery10Seconds[61s]) @ 190 and 200 will return 7 samples. TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 26, }, @@ -967,10 +963,9 @@ load 10s Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]", Start: time.Unix(201, 0), PeakSamples: 78, - TotalSamples: 338, // (1 histogram (size 13 HPoint) / 10 seconds * 60 seconds) * 4 + 2 * 13 as - // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. + TotalSamples: 312, // (1 histogram (size 13) / 10 seconds * 60 seconds) * 4 TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 201000: 338, + 201000: 312, }, }, { @@ -1307,7 +1302,7 @@ load 10s for _, c := range cases { t.Run(c.Query, func(t *testing.T) { opts := promql.NewPrometheusQueryOpts(true, 0) - engine := promqltest.NewTestEngine(true, 0, promqltest.DefaultMaxSamplesPerQuery) + engine := promqltest.NewTestEngine(t, true, 0, promqltest.DefaultMaxSamplesPerQuery) runQuery := func(expErr error) *stats.Statistics { var err error @@ -1334,7 +1329,7 @@ load 10s if c.SkipMaxCheck { return } - engine = promqltest.NewTestEngine(true, 0, stats.Samples.PeakSamples-1) + engine = promqltest.NewTestEngine(t, true, 0, stats.Samples.PeakSamples-1) runQuery(promql.ErrTooManySamples(env)) }) } @@ -1435,23 +1430,23 @@ load 10s }, { // The peak samples in memory is during the first evaluation: - // - Subquery takes 22 samples, 11 for each bigmetric, - // - Result is calculated per series where the series samples is buffered, hence 11 more here. + // - Subquery takes 20 samples, 10 for each bigmetric. + // - Result is calculated per series where the series samples is buffered, hence 10 more here. // - The result of two series is added before the last series buffer is discarded, so 2 more here. - // Hence at peak it is 22 (subquery) + 11 (buffer of a series) + 2 (result from 2 series). + // Hence at peak it is 20 (subquery) + 10 (buffer of a series) + 2 (result from 2 series). // The subquery samples and the buffer is discarded before duplicating. Query: `rate(bigmetric[10s:1s] @ 10)`, - MaxSamples: 35, + MaxSamples: 32, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, }, { // Here the reasoning is same as above. But LHS and RHS are done one after another. - // So while one of them takes 35 samples at peak, we need to hold the 2 sample + // So while one of them takes 32 samples at peak, we need to hold the 2 sample // result of the other till then. Query: `rate(bigmetric[10s:1s] @ 10) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 37, + MaxSamples: 34, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, @@ -1459,35 +1454,35 @@ load 10s { // promql.Sample as above but with only 1 part as step invariant. // Here the peak is caused by the non-step invariant part as it touches more time range. - // Hence at peak it is 2*21 (subquery from 0s to 20s) - // + 11 (buffer of a series per evaluation) + // Hence at peak it is 2*20 (subquery from 0s to 20s) + // + 10 (buffer of a series per evaluation) // + 6 (result from 2 series at 3 eval times). Query: `rate(bigmetric[10s:1s]) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 59, + MaxSamples: 56, Start: time.Unix(10, 0), End: time.Unix(20, 0), Interval: 5 * time.Second, }, { // Nested subquery. - // We saw that innermost rate takes 35 samples which is still the peak + // We saw that innermost rate takes 32 samples which is still the peak // since the other two subqueries just duplicate the result. - Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, - MaxSamples: 35, + Query: `rate(rate(bigmetric[10:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, + MaxSamples: 32, Start: time.Unix(10, 0), }, { // Nested subquery. - // Now the outmost subquery produces more samples than inner most rate. + // Now the outermost subquery produces more samples than innermost rate. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`, - MaxSamples: 36, + MaxSamples: 34, Start: time.Unix(10, 0), }, } for _, c := range cases { t.Run(c.Query, func(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) testFunc := func(expError error) { var err error var qry promql.Query @@ -1508,18 +1503,18 @@ load 10s } // Within limit. - engine = promqltest.NewTestEngine(false, 0, c.MaxSamples) + engine = promqltest.NewTestEngine(t, false, 0, c.MaxSamples) testFunc(nil) // Exceeding limit. - engine = promqltest.NewTestEngine(false, 0, c.MaxSamples-1) + engine = promqltest.NewTestEngine(t, false, 0, c.MaxSamples-1) testFunc(promql.ErrTooManySamples(env)) }) } } func TestAtModifier(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) storage := promqltest.LoadedStorage(t, ` load 10s metric{job="1"} 0+1x1000 @@ -1587,11 +1582,11 @@ load 1ms start: 10, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}}, + Floats: []promql.FPoint{{F: 29, T: 290000}, {F: 30, T: 300000}}, Metric: lbls1, }, promql.Series{ - Floats: []promql.FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}}, + Floats: []promql.FPoint{{F: 58, T: 290000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, @@ -1600,7 +1595,7 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}}, + Floats: []promql.FPoint{{F: 2, T: -1000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, @@ -1609,7 +1604,7 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, + Floats: []promql.FPoint{{F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, Metric: lblsneg, }, }, @@ -1618,13 +1613,26 @@ load 1ms start: 100, result: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, + Floats: []promql.FPoint{{F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, Metric: lblsms, }, }, }, { query: "metric[100s:25s] @ 300", start: 100, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, + Metric: lbls1, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}}, + Metric: lbls2, + }, + }, + }, { + query: "metric[100s1ms:25s] @ 300", // Add 1ms to the range to see the legacy behavior of the previous test. + start: 100, result: promql.Matrix{ promql.Series{ Floats: []promql.FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, @@ -1638,6 +1646,15 @@ load 1ms }, { query: "metric_neg[50s:25s] @ 0", start: 100, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 26, T: -25000}, {F: 1, T: 0}}, + Metric: lblsneg, + }, + }, + }, { + query: "metric_neg[50s1ms:25s] @ 0", // Add 1ms to the range to see the legacy behavior of the previous test. + start: 100, result: promql.Matrix{ promql.Series{ Floats: []promql.FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}}, @@ -1647,6 +1664,15 @@ load 1ms }, { query: "metric_neg[50s:25s] @ -100", start: 100, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 126, T: -125000}, {F: 101, T: -100000}}, + Metric: lblsneg, + }, + }, + }, { + query: "metric_neg[50s1ms:25s] @ -100", // Add 1ms to the range to see the legacy behavior of the previous test. + start: 100, result: promql.Matrix{ promql.Series{ Floats: []promql.FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}}, @@ -1654,7 +1680,7 @@ load 1ms }, }, }, { - query: `metric_ms[100ms:25ms] @ 2.345`, + query: `metric_ms[101ms:25ms] @ 2.345`, start: 100, result: promql.Matrix{ promql.Series{ @@ -1715,7 +1741,8 @@ load 1ms {F: 3600, T: 6 * 60 * 1000}, {F: 3600, T: 7 * 60 * 1000}, }, - Metric: labels.EmptyLabels(), + Metric: labels.EmptyLabels(), + DropName: true, }, }, }, @@ -1838,7 +1865,7 @@ func TestSubquerySelector(t *testing.T) { nil, promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, + Floats: []promql.FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1885,6 +1912,20 @@ func TestSubquerySelector(t *testing.T) { cases: []caseType{ { // Normal selector. Query: `http_requests{group=~"pro.*",instance="0"}[30s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, + }, + nil, + }, + Start: time.Unix(10020, 0), + }, + { // Normal selector. Add 1ms to the range to see the legacy behavior of the previous test. + Query: `http_requests{group=~"pro.*",instance="0"}[30s1ms:10s]`, Result: promql.Result{ nil, promql.Matrix{ @@ -1931,20 +1972,54 @@ func TestSubquerySelector(t *testing.T) { nil, promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + Floats: []promql.FPoint{{F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + DropName: true, }, promql.Series{ - Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + Floats: []promql.FPoint{{F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + DropName: true, }, promql.Series{ - Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + Floats: []promql.FPoint{{F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + DropName: true, }, promql.Series{ - Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + Floats: []promql.FPoint{{F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + DropName: true, + }, + }, + nil, + }, + Start: time.Unix(8000, 0), + }, + { + Query: `rate(http_requests[1m])[15s1ms:5s]`, // Add 1ms to the range to see the legacy behavior of the previous test. + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + DropName: true, }, }, nil, @@ -1953,6 +2028,35 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(120, 0), + }, + { + Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(121, 0), // 1s later doesn't change the result. + }, + { + // Add 1ms to the range to see the legacy behavior of the previous test. + Query: `sum(http_requests{group=~"pro.*"})[30s1ms:10s]`, Result: promql.Result{ nil, promql.Matrix{ @@ -1967,6 +2071,20 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests)[40s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(120, 0), + }, + { + Query: `sum(http_requests)[40s1ms:10s]`, // Add 1ms to the range to see the legacy behavior of the previous test. Result: promql.Result{ nil, promql.Matrix{ @@ -1981,6 +2099,21 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s:5s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(120, 0), + }, + { + // Add 1ms to the range to see the legacy behavior of the previous test. + Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s1ms:5s]`, Result: promql.Result{ nil, promql.Matrix{ @@ -1997,7 +2130,7 @@ func TestSubquerySelector(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) storage := promqltest.LoadedStorage(t, tst.loadString) t.Cleanup(func() { storage.Close() }) @@ -2017,26 +2150,17 @@ func TestSubquerySelector(t *testing.T) { } } -type FakeQueryLogger struct { - closed bool - logs []interface{} -} +func getLogLines(t *testing.T, name string) []string { + content, err := os.ReadFile(name) + require.NoError(t, err) -func NewFakeQueryLogger() *FakeQueryLogger { - return &FakeQueryLogger{ - closed: false, - logs: make([]interface{}, 0), + lines := strings.Split(string(content), "\n") + for i := len(lines) - 1; i >= 0; i-- { + if lines[i] == "" { + lines = append(lines[:i], lines[i+1:]...) + } } -} - -func (f *FakeQueryLogger) Close() error { - f.closed = true - return nil -} - -func (f *FakeQueryLogger) Log(l ...interface{}) error { - f.logs = append(f.logs, l...) - return nil + return lines } func TestQueryLogger_basic(t *testing.T) { @@ -2046,7 +2170,7 @@ func TestQueryLogger_basic(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) queryExec := func() { ctx, cancelCtx := context.WithCancel(context.Background()) @@ -2061,33 +2185,45 @@ func TestQueryLogger_basic(t *testing.T) { // promql.Query works without query log initialized. queryExec() - f1 := NewFakeQueryLogger() + tmpDir := t.TempDir() + ql1File := filepath.Join(tmpDir, "query1.log") + f1, err := logging.NewJSONFileLogger(ql1File) + require.NoError(t, err) + engine.SetQueryLogger(f1) queryExec() - for i, field := range []interface{}{"params", map[string]interface{}{"query": "test statement"}} { - require.Equal(t, field, f1.logs[i]) - } + logLines := getLogLines(t, ql1File) + require.Contains(t, logLines[0], "params", map[string]interface{}{"query": "test statement"}) + require.Len(t, logLines, 1) - l := len(f1.logs) + l := len(logLines) queryExec() - require.Len(t, f1.logs, 2*l) + logLines = getLogLines(t, ql1File) + l2 := len(logLines) + require.Equal(t, l2, 2*l) - // Test that we close the query logger when unsetting it. - require.False(t, f1.closed, "expected f1 to be open, got closed") + // Test that we close the query logger when unsetting it. The following + // attempt to close the file should error. engine.SetQueryLogger(nil) - require.True(t, f1.closed, "expected f1 to be closed, got open") + err = f1.Close() + require.ErrorContains(t, err, "file already closed", "expected f1 to be closed, got open") queryExec() // Test that we close the query logger when swapping. - f2 := NewFakeQueryLogger() - f3 := NewFakeQueryLogger() + ql2File := filepath.Join(tmpDir, "query2.log") + f2, err := logging.NewJSONFileLogger(ql2File) + require.NoError(t, err) + ql3File := filepath.Join(tmpDir, "query3.log") + f3, err := logging.NewJSONFileLogger(ql3File) + require.NoError(t, err) engine.SetQueryLogger(f2) - require.False(t, f2.closed, "expected f2 to be open, got closed") queryExec() engine.SetQueryLogger(f3) - require.True(t, f2.closed, "expected f2 to be closed, got open") - require.False(t, f3.closed, "expected f3 to be open, got closed") + err = f2.Close() + require.ErrorContains(t, err, "file already closed", "expected f2 to be closed, got open") queryExec() + err = f3.Close() + require.NoError(t, err) } func TestQueryLogger_fields(t *testing.T) { @@ -2097,9 +2233,16 @@ func TestQueryLogger_fields(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) + + tmpDir := t.TempDir() + ql1File := filepath.Join(tmpDir, "query1.log") + f1, err := logging.NewJSONFileLogger(ql1File) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, f1.Close()) + }) - f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) @@ -2112,11 +2255,8 @@ func TestQueryLogger_fields(t *testing.T) { res := query.Exec(ctx) require.NoError(t, res.Err) - expected := []string{"foo", "bar"} - for i, field := range expected { - v := f1.logs[len(f1.logs)-len(expected)+i].(string) - require.Equal(t, field, v) - } + logLines := getLogLines(t, ql1File) + require.Contains(t, logLines[0], "foo", "bar") } func TestQueryLogger_error(t *testing.T) { @@ -2126,25 +2266,32 @@ func TestQueryLogger_error(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) + + tmpDir := t.TempDir() + ql1File := filepath.Join(tmpDir, "query1.log") + f1, err := logging.NewJSONFileLogger(ql1File) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, f1.Close()) + }) - f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) defer cancelCtx() testErr := errors.New("failure") - query := engine.NewTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(_ context.Context) error { return testErr }) res := query.Exec(ctx) require.Error(t, res.Err, "query should have failed") - for i, field := range []interface{}{"params", map[string]interface{}{"query": "test statement"}, "error", testErr} { - require.Equal(t, f1.logs[i], field) - } + logLines := getLogLines(t, ql1File) + require.Contains(t, logLines[0], "error", testErr) + require.Contains(t, logLines[0], "params", map[string]interface{}{"query": "test statement"}) } func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { @@ -2749,7 +2896,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { }, PosRange: posrange.PositionRange{ Start: 29, - End: 52, + End: 51, }, }, }, @@ -2941,7 +3088,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { t.Run(test.input, func(t *testing.T) { expr, err := parser.ParseExpr(test.input) require.NoError(t, err) - expr = promql.PreprocessExpr(expr, startTime, endTime) + expr, err = promql.PreprocessExpr(expr, startTime, endTime, 0) + require.NoError(t, err) if test.outputTest { require.Equal(t, test.input, expr.String(), "error on input '%s'", test.input) } @@ -3009,7 +3157,7 @@ func TestEngineOptsValidation(t *testing.T) { } for _, c := range cases { - eng := promql.NewEngine(c.opts) + eng := promqltest.NewTestEngineWithOpts(t, c.opts) _, err1 := eng.NewInstantQuery(context.Background(), nil, nil, c.query, time.Unix(10, 0)) _, err2 := eng.NewRangeQuery(context.Background(), nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) if c.fail { @@ -3022,14 +3170,38 @@ func TestEngineOptsValidation(t *testing.T) { } } +func TestEngine_Close(t *testing.T) { + t.Run("nil engine", func(t *testing.T) { + var ng *promql.Engine + require.NoError(t, ng.Close()) + }) + + t.Run("non-nil engine", func(t *testing.T) { + ng := promql.NewEngine(promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 0, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: nil, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: false, + LookbackDelta: 0, + EnableDelayedNameRemoval: true, + }) + require.NoError(t, ng.Close()) + }) +} + func TestInstantQueryWithRangeVectorSelector(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) baseT := timestamp.Time(0) storage := promqltest.LoadedStorage(t, ` load 1m some_metric{env="1"} 0+1x4 some_metric{env="2"} 0+2x4 + some_metric{env="3"} {{count:0}}+{{count:1}}x4 some_metric_with_stale_marker 0 1 stale 3 `) t.Cleanup(func() { require.NoError(t, storage.Close()) }) @@ -3040,7 +3212,7 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { ts time.Time }{ "matches series with points in range": { - expr: "some_metric[1m]", + expr: "some_metric[2m]", ts: baseT.Add(2 * time.Minute), expected: promql.Matrix{ { @@ -3057,6 +3229,13 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 4}, }, }, + { + Metric: labels.FromStrings("__name__", "some_metric", "env", "3"), + Histograms: []promql.HPoint{ + {T: timestamp.FromTime(baseT.Add(time.Minute)), H: &histogram.FloatHistogram{Count: 1, CounterResetHint: histogram.NotCounterReset}}, + {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), H: &histogram.FloatHistogram{Count: 2, CounterResetHint: histogram.NotCounterReset}}, + }, + }, }, }, "matches no series": { @@ -3076,7 +3255,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { { Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"), Floats: []promql.FPoint{ - {T: timestamp.FromTime(baseT), F: 0}, {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1}, {T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3}, }, @@ -3098,617 +3276,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { } } -func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - cases := []struct { - histograms []histogram.Histogram - expected histogram.FloatHistogram - expectedAvg histogram.FloatHistogram - }{ - { - histograms: []histogram.Histogram{ - { - CounterResetHint: histogram.GaugeType, - Schema: 0, - Count: 25, - Sum: 1234.5, - ZeroThreshold: 0.001, - ZeroCount: 4, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{1, 1, -1, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 2, Length: 2}, - }, - NegativeBuckets: []int64{2, 2, -3, 8}, - }, - { - CounterResetHint: histogram.GaugeType, - Schema: 0, - Count: 41, - Sum: 2345.6, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - { - CounterResetHint: histogram.GaugeType, - Schema: 0, - Count: 41, - Sum: 1111.1, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - { - CounterResetHint: histogram.GaugeType, - Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers. - }, - }, - expected: histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 0, - ZeroThreshold: 0.001, - ZeroCount: 14, - Count: 107, - Sum: 4691.2, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 7}, - }, - PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 6}, - {Offset: 3, Length: 3}, - }, - NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4}, - }, - expectedAvg: histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 0, - ZeroThreshold: 0.001, - ZeroCount: 3.5, - Count: 26.75, - Sum: 1172.8, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 7}, - }, - PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 6}, - {Offset: 3, Length: 3}, - }, - NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1}, - }, - }, - } - - idx0 := int64(0) - for _, c := range cases { - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - seriesNameOverTime := "sparse_histogram_series_over_time" - - engine := newTestEngine() - - ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - _, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42) - require.NoError(t, err) - for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) - // Since we mutate h later, we need to create a copy here. - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) - } - require.NoError(t, err) - - lbls = labels.FromStrings("__name__", seriesNameOverTime) - newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond) - // Since we mutate h later, we need to create a copy here. - if floatHisto { - _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil) - } - require.NoError(t, err) - } - require.NoError(t, app.Commit()) - - queryAndCheck := func(queryString string, ts int64, exp promql.Vector) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - require.Empty(t, res.Warnings) - - vector, err := res.Vector() - require.NoError(t, err) - - testutil.RequireEqual(t, exp, vector) - } - queryAndCheckAnnotations := func(queryString string, ts int64, expWarnings annotations.Annotations) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - require.Equal(t, expWarnings, res.Warnings) - } - - // sum(). - queryString := fmt.Sprintf("sum(%s)", seriesName) - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) - - queryString = `sum({idx="0"})` - var annos annotations.Annotations - annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(posrange.PositionRange{Start: 4, End: 13})) - queryAndCheckAnnotations(queryString, ts, annos) - - // + operator. - queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName) - for idx := 1; idx < len(c.histograms); idx++ { - queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) - } - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) - - // count(). - queryString = fmt.Sprintf("count(%s)", seriesName) - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) - - // avg(). - queryString = fmt.Sprintf("avg(%s)", seriesName) - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) - - offset := int64(len(c.histograms) - 1) - newTs := ts + offset*int64(time.Minute/time.Millisecond) - - // sum_over_time(). - queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}}) - - // avg_over_time(). - queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) - }) - idx0++ - } - } -} - -func TestNativeHistogram_SubOperator(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - cases := []struct { - histograms []histogram.Histogram - expected histogram.FloatHistogram - }{ - { - histograms: []histogram.Histogram{ - { - Schema: 0, - Count: 41, - Sum: 2345.6, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - { - Schema: 0, - Count: 11, - Sum: 1234.5, - ZeroThreshold: 0.001, - ZeroCount: 3, - PositiveSpans: []histogram.Span{ - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, -1}, - NegativeSpans: []histogram.Span{ - {Offset: 2, Length: 2}, - }, - NegativeBuckets: []int64{3, -1}, - }, - }, - expected: histogram.FloatHistogram{ - Schema: 0, - Count: 30, - Sum: 1111.1, - ZeroThreshold: 0.001, - ZeroCount: 2, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 4}, - }, - PositiveBuckets: []float64{1, 1, 2, 1, 1, 1}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 2}, - {Offset: 1, Length: 1}, - {Offset: 4, Length: 3}, - }, - NegativeBuckets: []float64{1, 1, 7, 5, 5, 2}, - }, - }, - { - histograms: []histogram.Histogram{ - { - Schema: 0, - Count: 41, - Sum: 2345.6, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - { - Schema: 1, - Count: 11, - Sum: 1234.5, - ZeroThreshold: 0.001, - ZeroCount: 3, - PositiveSpans: []histogram.Span{ - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, -1}, - NegativeSpans: []histogram.Span{ - {Offset: 2, Length: 2}, - }, - NegativeBuckets: []int64{3, -1}, - }, - }, - expected: histogram.FloatHistogram{ - Schema: 0, - Count: 30, - Sum: 1111.1, - ZeroThreshold: 0.001, - ZeroCount: 2, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 1}, - {Offset: 1, Length: 5}, - }, - PositiveBuckets: []float64{1, 1, 2, 1, 1, 1}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 4, Length: 3}, - }, - NegativeBuckets: []float64{-2, 2, 2, 7, 5, 5, 2}, - }, - }, - { - histograms: []histogram.Histogram{ - { - Schema: 1, - Count: 11, - Sum: 1234.5, - ZeroThreshold: 0.001, - ZeroCount: 3, - PositiveSpans: []histogram.Span{ - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, -1}, - NegativeSpans: []histogram.Span{ - {Offset: 2, Length: 2}, - }, - NegativeBuckets: []int64{3, -1}, - }, - { - Schema: 0, - Count: 41, - Sum: 2345.6, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - }, - expected: histogram.FloatHistogram{ - Schema: 0, - Count: -30, - Sum: -1111.1, - ZeroThreshold: 0.001, - ZeroCount: -2, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 1}, - {Offset: 1, Length: 5}, - }, - PositiveBuckets: []float64{-1, -1, -2, -1, -1, -1}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 4, Length: 3}, - }, - NegativeBuckets: []float64{2, -2, -2, -7, -5, -5, -2}, - }, - }, - } - - idx0 := int64(0) - for _, c := range cases { - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - - ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) - // Since we mutate h later, we need to create a copy here. - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) - } - require.NoError(t, err) - } - require.NoError(t, app.Commit()) - - queryAndCheck := func(queryString string, exp promql.Vector) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - if len(vector) == len(exp) { - for i, e := range exp { - got := vector[i].H - if got != e.H { - // Error messages are better if we compare structs, not pointers. - require.Equal(t, *e.H, *got) - } - } - } - - testutil.RequireEqual(t, exp, vector) - } - - // - operator. - queryString := fmt.Sprintf(`%s{idx="0"}`, seriesName) - for idx := 1; idx < len(c.histograms); idx++ { - queryString += fmt.Sprintf(` - ignoring(idx) %s{idx="%d"}`, seriesName, idx) - } - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) - }) - } - idx0++ - } -} - -func TestNativeHistogram_MulDivOperator(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - originalHistogram := histogram.Histogram{ - Schema: 0, - Count: 21, - Sum: 33, - ZeroThreshold: 0.001, - ZeroCount: 3, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{3, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []int64{3, 0, 0}, - } - - cases := []struct { - scalar float64 - histogram histogram.Histogram - expectedMul histogram.FloatHistogram - expectedDiv histogram.FloatHistogram - }{ - { - scalar: 3, - histogram: originalHistogram, - expectedMul: histogram.FloatHistogram{ - Schema: 0, - Count: 63, - Sum: 99, - ZeroThreshold: 0.001, - ZeroCount: 9, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{9, 9, 9}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{9, 9, 9}, - }, - expectedDiv: histogram.FloatHistogram{ - Schema: 0, - Count: 7, - Sum: 11, - ZeroThreshold: 0.001, - ZeroCount: 1, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{1, 1, 1}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{1, 1, 1}, - }, - }, - { - scalar: 0, - histogram: originalHistogram, - expectedMul: histogram.FloatHistogram{ - Schema: 0, - Count: 0, - Sum: 0, - ZeroThreshold: 0.001, - ZeroCount: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{0, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{0, 0, 0}, - }, - expectedDiv: histogram.FloatHistogram{ - Schema: 0, - Count: math.Inf(1), - Sum: math.Inf(1), - ZeroThreshold: 0.001, - ZeroCount: math.Inf(1), - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, - }, - }, - } - - idx0 := int64(0) - for _, c := range cases { - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - floatSeriesName := "float_series" - - engine := newTestEngine() - - ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - h := c.histogram - lbls := labels.FromStrings("__name__", seriesName) - // Since we mutate h later, we need to create a copy here. - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) - } - require.NoError(t, err) - _, err = app.Append(0, labels.FromStrings("__name__", floatSeriesName), ts, c.scalar) - require.NoError(t, err) - require.NoError(t, app.Commit()) - - queryAndCheck := func(queryString string, exp promql.Vector) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - testutil.RequireEqual(t, exp, vector) - } - - // histogram * scalar. - queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // scalar * histogram. - queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // histogram * float. - queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // float * histogram. - queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // histogram / scalar. - queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) - - // histogram / float. - queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) - }) - idx0++ - } - } -} - func TestQueryLookbackDelta(t *testing.T) { var ( load = `load 5m @@ -3726,43 +3293,43 @@ metric 0 1 2 }{ { name: "default lookback delta", - ts: lastDatapointTs.Add(defaultLookbackDelta), + ts: lastDatapointTs.Add(defaultLookbackDelta - time.Millisecond), expectSamples: true, }, { name: "outside default lookback delta", - ts: lastDatapointTs.Add(defaultLookbackDelta + time.Millisecond), + ts: lastDatapointTs.Add(defaultLookbackDelta), expectSamples: false, }, { name: "custom engine lookback delta", - ts: lastDatapointTs.Add(10 * time.Minute), + ts: lastDatapointTs.Add(10*time.Minute - time.Millisecond), engineLookback: 10 * time.Minute, expectSamples: true, }, { name: "outside custom engine lookback delta", - ts: lastDatapointTs.Add(10*time.Minute + time.Millisecond), + ts: lastDatapointTs.Add(10 * time.Minute), engineLookback: 10 * time.Minute, expectSamples: false, }, { name: "custom query lookback delta", - ts: lastDatapointTs.Add(20 * time.Minute), + ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond), engineLookback: 10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: true, }, { name: "outside custom query lookback delta", - ts: lastDatapointTs.Add(20*time.Minute + time.Millisecond), + ts: lastDatapointTs.Add(20 * time.Minute), engineLookback: 10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: false, }, { name: "negative custom query lookback delta", - ts: lastDatapointTs.Add(20 * time.Minute), + ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond), engineLookback: -10 * time.Minute, queryLookback: 20 * time.Minute, expectSamples: true, @@ -3772,7 +3339,7 @@ metric 0 1 2 for _, c := range cases { c := c t.Run(c.name, func(t *testing.T) { - engine := promqltest.NewTestEngine(false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery) + engine := promqltest.NewTestEngine(t, false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery) storage := promqltest.LoadedStorage(t, load) t.Cleanup(func() { storage.Close() }) @@ -3798,3 +3365,623 @@ func makeInt64Pointer(val int64) *int64 { *valp = val return valp } + +func TestHistogramCopyFromIteratorRegression(t *testing.T) { + // Loading the following histograms creates two chunks because there's a + // counter reset. Not only the counter is lower in the last histogram + // but also there's missing buckets. + // This in turns means that chunk iterators will have different spans. + load := `load 1m +histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:1 count:1 buckets:[1]}} +` + storage := promqltest.LoadedStorage(t, load) + t.Cleanup(func() { storage.Close() }) + engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) + + verify := func(t *testing.T, qry promql.Query, expected []histogram.FloatHistogram) { + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + + m, ok := res.Value.(promql.Matrix) + require.True(t, ok) + + require.Len(t, m, 1) + series := m[0] + + require.Empty(t, series.Floats) + require.Len(t, series.Histograms, len(expected)) + for i, e := range expected { + series.Histograms[i].H.CounterResetHint = histogram.UnknownCounterReset // Don't care. + require.Equal(t, &e, series.Histograms[i].H) + } + } + + qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[90s])", time.Unix(0, 0), time.Unix(0, 0).Add(60*time.Second), time.Minute) + require.NoError(t, err) + verify(t, qry, []histogram.FloatHistogram{ + { + Count: 3, + Sum: 3, // Increase from 4 to 6 is 2. Interpolation adds 1. + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram. + PositiveBuckets: []float64{1.5, 1.5}, // Increase from 2 to 3 is 1 in both buckets. Interpolation adds 0.5. + }, + }) + + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[61s]", time.Unix(0, 0).Add(2*time.Minute)) + require.NoError(t, err) + verify(t, qry, []histogram.FloatHistogram{ + { + Count: 6, + Sum: 6, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []float64{3, 3}, + }, + { + Count: 1, + Sum: 1, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []float64{1}, + }, + }) +} + +func TestRateAnnotations(t *testing.T) { + testCases := map[string]struct { + data string + expr string + typeAndUnitLabelsEnabled bool + expectedWarningAnnotations []string + expectedInfoAnnotations []string + }{ + "info annotation when two samples are selected": { + data: ` + series 1 2 + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{ + `PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "series" (1:6)`, + }, + }, + "no info annotations when no samples": { + data: ` + series + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when selecting one sample": { + data: ` + series 1 2 + `, + expr: "rate(series[10s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when no samples due to mixed data types": { + data: ` + series{label="a"} 1 {{schema:1 sum:15 count:10 buckets:[1 2 3]}} + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{ + `PromQL warning: encountered a mix of histograms and floats for metric name "series" (1:6)`, + }, + expectedInfoAnnotations: []string{}, + }, + "no info annotations when selecting two native histograms": { + data: ` + series{label="a"} {{schema:1 sum:10 count:5 buckets:[1 2 3]}} {{schema:1 sum:15 count:10 buckets:[1 2 3]}} + `, + expr: "rate(series[1m1s])", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "info annotation when rate() over series without _total suffix": { + data: ` + series{label="a"} 1 2 3 + `, + expr: "rate(series[1m1s])", + expectedInfoAnnotations: []string{ + `PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "series" (1:6)`, + }, + expectedWarningAnnotations: []string{}, + }, + "info annotation when increase() over series without _total suffix": { + data: ` + series{label="a"} 1 2 3 + `, + expr: "increase(series[1m1s])", + expectedInfoAnnotations: []string{ + `PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "series" (1:10)`, + }, + expectedWarningAnnotations: []string{}, + }, + "info annotation when rate() over series without __type__=counter label": { + data: ` + series{label="a"} 1 2 3 + `, + expr: "rate(series[1m1s])", + typeAndUnitLabelsEnabled: true, + expectedInfoAnnotations: []string{ + `PromQL info: metric might not be a counter, __type__ label is not set to "counter", got "": "series" (1:6)`, + }, + expectedWarningAnnotations: []string{}, + }, + "info annotation when increase() over series without __type__=counter label": { + data: ` + series{label="a"} 1 2 3 + `, + expr: "increase(series[1m1s])", + typeAndUnitLabelsEnabled: true, + expectedInfoAnnotations: []string{ + `PromQL info: metric might not be a counter, __type__ label is not set to "counter", got "": "series" (1:10)`, + }, + expectedWarningAnnotations: []string{}, + }, + "no info annotation when rate() over series with __type__=counter label": { + data: ` + series{label="a", __type__="counter"} 1 2 3 + `, + expr: "rate(series[1m1s])", + typeAndUnitLabelsEnabled: true, + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotation when increase() over series with __type__=counter label": { + data: ` + series{label="a", __type__="counter"} 1 2 3 + `, + expr: "increase(series[1m1s])", + typeAndUnitLabelsEnabled: true, + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotation when rate() over series with _total suffix": { + data: ` + series_total{label="a"} 1 2 3 + `, + expr: "rate(series_total[1m1s])", + typeAndUnitLabelsEnabled: false, + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotation when rate() over series with _sum suffix": { + data: ` + series_sum{label="a"} 1 2 3 + `, + expr: "rate(series_sum[1m1s])", + typeAndUnitLabelsEnabled: false, + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotation when rate() over series with _count suffix": { + data: ` + series_count{label="a"} 1 2 3 + `, + expr: "rate(series_count[1m1s])", + typeAndUnitLabelsEnabled: false, + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotation when rate() over series with _bucket suffix": { + data: ` + series_bucket{label="a"} 1 2 3 + `, + expr: "rate(series_bucket[1m1s])", + typeAndUnitLabelsEnabled: false, + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotation when increase() over series with _total suffix": { + data: ` + series_total{label="a"} 1 2 3 + `, + expr: "increase(series_total[1m1s])", + expectedWarningAnnotations: []string{}, + typeAndUnitLabelsEnabled: false, + expectedInfoAnnotations: []string{}, + }, + "no info annotation when increase() over series with _sum suffix": { + data: ` + series_sum{label="a"} 1 2 3 + `, + expr: "increase(series_sum[1m1s])", + typeAndUnitLabelsEnabled: false, + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotation when increase() over series with _count suffix": { + data: ` + series_count{label="a"} 1 2 3 + `, + expr: "increase(series_count[1m1s])", + typeAndUnitLabelsEnabled: false, + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + "no info annotation when increase() over series with _bucket suffix": { + data: ` + series_bucket{label="a"} 1 2 3 + `, + expr: "increase(series_bucket[1m1s])", + typeAndUnitLabelsEnabled: false, + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{}, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + store := promqltest.LoadedStorage(t, "load 1m\n"+strings.TrimSpace(testCase.data)) + t.Cleanup(func() { _ = store.Close() }) + + engine := promqltest.NewTestEngineWithOpts(t, promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: promqltest.DefaultMaxSamplesPerQuery, + Timeout: 100 * time.Minute, + NoStepSubqueryIntervalFn: func(int64) int64 { return int64((1 * time.Minute / time.Millisecond / time.Nanosecond)) }, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: false, + LookbackDelta: 0, + EnableDelayedNameRemoval: true, + EnableTypeAndUnitLabels: testCase.typeAndUnitLabelsEnabled, + }) + query, err := engine.NewInstantQuery(context.Background(), store, nil, testCase.expr, timestamp.Time(0).Add(1*time.Minute)) + require.NoError(t, err) + t.Cleanup(query.Close) + + res := query.Exec(context.Background()) + require.NoError(t, res.Err) + + warnings, infos := res.Warnings.AsStrings(testCase.expr, 0, 0) + testutil.RequireEqual(t, testCase.expectedWarningAnnotations, warnings) + testutil.RequireEqual(t, testCase.expectedInfoAnnotations, infos) + }) + } +} + +func TestHistogramQuantileAnnotations(t *testing.T) { + testCases := map[string]struct { + data string + expr string + expectedWarningAnnotations []string + expectedInfoAnnotations []string + }{ + "info annotation for nonmonotonic buckets": { + data: ` + nonmonotonic_bucket{le="0.1"} 0+2x10 + nonmonotonic_bucket{le="1"} 0+1x10 + nonmonotonic_bucket{le="10"} 0+5x10 + nonmonotonic_bucket{le="100"} 0+4x10 + nonmonotonic_bucket{le="1000"} 0+9x10 + nonmonotonic_bucket{le="+Inf"} 0+8x10 + `, + expr: "histogram_quantile(0.5, nonmonotonic_bucket)", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{ + `PromQL info: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name "nonmonotonic_bucket" (1:25)`, + }, + }, + "warning annotation for missing le label": { + data: ` + myHistogram{abe="0.1"} 0+2x10 + `, + expr: "histogram_quantile(0.5, myHistogram)", + expectedWarningAnnotations: []string{ + `PromQL warning: bucket label "le" is missing or has a malformed value of "" for metric name "myHistogram" (1:25)`, + }, + expectedInfoAnnotations: []string{}, + }, + "warning annotation for malformed le label": { + data: ` + myHistogram{le="Hello World"} 0+2x10 + `, + expr: "histogram_quantile(0.5, myHistogram)", + expectedWarningAnnotations: []string{ + `PromQL warning: bucket label "le" is missing or has a malformed value of "Hello World" for metric name "myHistogram" (1:25)`, + }, + expectedInfoAnnotations: []string{}, + }, + "warning annotation for mixed histograms": { + data: ` + mixedHistogram{le="0.1"} 0+2x10 + mixedHistogram{le="1"} 0+3x10 + mixedHistogram{} {{schema:0 count:10 sum:50 buckets:[1 2 3]}} + `, + expr: "histogram_quantile(0.5, mixedHistogram)", + expectedWarningAnnotations: []string{ + `PromQL warning: vector contains a mix of classic and native histograms for metric name "mixedHistogram" (1:25)`, + }, + expectedInfoAnnotations: []string{}, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + store := promqltest.LoadedStorage(t, "load 1m\n"+strings.TrimSpace(testCase.data)) + t.Cleanup(func() { _ = store.Close() }) + + engine := newTestEngine(t) + query, err := engine.NewInstantQuery(context.Background(), store, nil, testCase.expr, timestamp.Time(0).Add(1*time.Minute)) + require.NoError(t, err) + t.Cleanup(query.Close) + + res := query.Exec(context.Background()) + require.NoError(t, res.Err) + + warnings, infos := res.Warnings.AsStrings(testCase.expr, 0, 0) + testutil.RequireEqual(t, testCase.expectedWarningAnnotations, warnings) + testutil.RequireEqual(t, testCase.expectedInfoAnnotations, infos) + }) + } +} + +func TestHistogramRateWithFloatStaleness(t *testing.T) { + // Make a chunk with two normal histograms of the same value. + h1 := histogram.Histogram{ + Schema: 2, + Count: 10, + Sum: 100, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{100}, + } + + c1 := chunkenc.NewHistogramChunk() + app, err := c1.Appender() + require.NoError(t, err) + var ( + newc chunkenc.Chunk + recoded bool + ) + + newc, recoded, app, err = app.AppendHistogram(nil, 0, h1.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + newc, recoded, _, err = app.AppendHistogram(nil, 10, h1.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + // Make a chunk with a single float stale marker. + c2 := chunkenc.NewXORChunk() + app, err = c2.Appender() + require.NoError(t, err) + + app.Append(20, math.Float64frombits(value.StaleNaN)) + + // Make a chunk with two normal histograms that have zero value. + h2 := histogram.Histogram{ + Schema: 2, + } + + c3 := chunkenc.NewHistogramChunk() + app, err = c3.Appender() + require.NoError(t, err) + + newc, recoded, app, err = app.AppendHistogram(nil, 30, h2.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + newc, recoded, _, err = app.AppendHistogram(nil, 40, h2.Copy(), false) + require.NoError(t, err) + require.False(t, recoded) + require.Nil(t, newc) + + querier := storage.MockQuerier{ + SelectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { + return &singleSeriesSet{ + series: mockSeries{chunks: []chunkenc.Chunk{c1, c2, c3}, labelSet: []string{"__name__", "foo"}}, + } + }, + } + + queriable := storage.MockQueryable{MockQuerier: &querier} + + engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) + + q, err := engine.NewInstantQuery(context.Background(), &queriable, nil, "rate(foo[40s])", timestamp.Time(45)) + require.NoError(t, err) + defer q.Close() + + res := q.Exec(context.Background()) + require.NoError(t, res.Err) + + vec, err := res.Vector() + require.NoError(t, err) + + // Single sample result. + require.Len(t, vec, 1) + // The result is a histogram. + require.NotNil(t, vec[0].H) + // The result should be zero as the histogram has not increased, so the rate is zero. + require.Equal(t, 0.0, vec[0].H.Count) + require.Equal(t, 0.0, vec[0].H.Sum) +} + +type singleSeriesSet struct { + series storage.Series + consumed bool +} + +func (s *singleSeriesSet) Next() bool { c := s.consumed; s.consumed = true; return !c } +func (s singleSeriesSet) At() storage.Series { return s.series } +func (s singleSeriesSet) Err() error { return nil } +func (s singleSeriesSet) Warnings() annotations.Annotations { return nil } + +type mockSeries struct { + chunks []chunkenc.Chunk + labelSet []string +} + +func (s mockSeries) Labels() labels.Labels { + return labels.FromStrings(s.labelSet...) +} + +func (s mockSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { + iterables := []chunkenc.Iterator{} + for _, c := range s.chunks { + iterables = append(iterables, c.Iterator(nil)) + } + return storage.ChainSampleIteratorFromIterators(it, iterables) +} + +func TestEvaluationWithDelayedNameRemovalDisabled(t *testing.T) { + opts := promql.EngineOpts{ + Logger: nil, + Reg: nil, + EnableAtModifier: true, + MaxSamples: 10000, + Timeout: 10 * time.Second, + EnableDelayedNameRemoval: false, + } + engine := promqltest.NewTestEngineWithOpts(t, opts) + + promqltest.RunTest(t, ` +load 5m + metric_total{env="1"} 0 60 120 + another_metric{env="1"} 60 120 180 + +# Does not drop __name__ for vector selector +eval instant at 10m metric_total{env="1"} + metric_total{env="1"} 120 + +# Drops __name__ for unary operators +eval instant at 10m -metric_total + {env="1"} -120 + +# Drops __name__ for binary operators +eval instant at 10m metric_total + another_metric + {env="1"} 300 + +# Does not drop __name__ for binary comparison operators +eval instant at 10m metric_total <= another_metric + metric_total{env="1"} 120 + +# Drops __name__ for binary comparison operators with "bool" modifier +eval instant at 10m metric_total <= bool another_metric + {env="1"} 1 + +# Drops __name__ for vector-scalar operations +eval instant at 10m metric_total * 2 + {env="1"} 240 + +# Drops __name__ for instant-vector functions +eval instant at 10m clamp(metric_total, 0, 100) + {env="1"} 100 + +# Drops __name__ for round function +eval instant at 10m round(metric_total) + {env="1"} 120 + +# Drops __name__ for range-vector functions +eval instant at 10m rate(metric_total{env="1"}[10m]) + {env="1"} 0.2 + +# Does not drop __name__ for last_over_time function +eval instant at 10m last_over_time(metric_total{env="1"}[10m]) + metric_total{env="1"} 120 + +# Drops name for other _over_time functions +eval instant at 10m max_over_time(metric_total{env="1"}[10m]) + {env="1"} 120 +`, engine) +} + +// TestInconsistentHistogramCount is testing for +// https://github.com/prometheus/prometheus/issues/16681 which needs mixed +// integer and float histograms. The promql test framework only uses float +// histograms, so we cannot move this to promql tests. +func TestInconsistentHistogramCount(t *testing.T) { + dir := t.TempDir() + + opts := tsdb.DefaultHeadOptions() + opts.EnableNativeHistograms.Store(true) + opts.ChunkDirRoot = dir + // We use TSDB head only. By using full TSDB DB, and appending samples to it, closing it would cause unnecessary HEAD compaction, which slows down the test. + head, err := tsdb.NewHead(nil, nil, nil, nil, opts, nil) + require.NoError(t, err) + t.Cleanup(func() { + _ = head.Close() + }) + + app := head.Appender(context.Background()) + + l := labels.FromStrings("__name__", "series_1") + + mint := time.Now().Add(-time.Hour).UnixMilli() + maxt := mint + dT := int64(15 * 1000) // 15 seconds in milliseconds. + inHs := []*histogram.Histogram{ + { + Count: 2, + Sum: 100, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 1}, + }, + PositiveBuckets: []int64{2}, + }, + { + Count: 4, + Sum: 200, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 1}, + }, + PositiveBuckets: []int64{4}, + }, + {}, // Empty histogram to trigger counter reset. + } + + for i, h := range inHs { + maxt = mint + dT*int64(i) + _, err = app.AppendHistogram(0, l, maxt, h, nil) + require.NoError(t, err) + } + + require.NoError(t, app.Commit()) + queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { + return tsdb.NewBlockQuerier(head, mint, maxt) + }) + + engine := promql.NewEngine(promql.EngineOpts{ + MaxSamples: 1000000, + Timeout: 10 * time.Second, + LookbackDelta: 5 * time.Minute, + }) + + var ( + query promql.Query + queryResult *promql.Result + v promql.Vector + ) + + query, err = engine.NewInstantQuery(context.Background(), queryable, nil, "(rate(series_1[1m]))", time.UnixMilli(maxt)) + require.NoError(t, err) + queryResult = query.Exec(context.Background()) + require.NoError(t, queryResult.Err) + require.NotNil(t, queryResult) + v, err = queryResult.Vector() + require.NoError(t, err) + require.Len(t, v, 1) + require.NotNil(t, v[0].H) + require.Greater(t, v[0].H.Count, float64(0)) + query.Close() + countFromHistogram := v[0].H.Count + + query, err = engine.NewInstantQuery(context.Background(), queryable, nil, "histogram_count((rate(series_1[1m])))", time.UnixMilli(maxt)) + require.NoError(t, err) + queryResult = query.Exec(context.Background()) + require.NoError(t, queryResult.Err) + require.NotNil(t, queryResult) + v, err = queryResult.Vector() + require.NoError(t, err) + require.Len(t, v, 1) + require.Nil(t, v[0].H) + query.Close() + countFromFunction := float64(v[0].F) + + require.Equal(t, countFromHistogram, countFromFunction, "histogram_count function should return the same count as the histogram itself") +} diff --git a/promql/functions.go b/promql/functions.go index dcc2cd7590..9af904c9e6 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -14,12 +14,12 @@ package promql import ( + "context" "errors" "fmt" "math" "slices" "sort" - "strconv" "strings" "time" @@ -31,6 +31,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/schema" "github.com/prometheus/prometheus/util/annotations" ) @@ -58,7 +59,7 @@ import ( type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) // === time() float64 === -func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{ F: float64(enh.Ts) / 1000, }}, nil @@ -97,9 +98,10 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod lastT = samples.Histograms[numSamplesMinusOne].T var newAnnos annotations.Annotations resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange()) + annos.Merge(newAnnos) if resultHistogram == nil { // The histograms are not compatible with each other. - return enh.Out, annos.Merge(newAnnos) + return enh.Out, annos } case len(samples.Floats) > 1: numSamplesMinusOne = len(samples.Floats) - 1 @@ -129,10 +131,18 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod sampledInterval := float64(lastT-firstT) / 1000 averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne) - // If the first/last samples are close to the boundaries of the range, - // extrapolate the result. This is as we expect that another sample - // will exist given the spacing between samples we've seen thus far, - // with an allowance for noise. + // If samples are close enough to the (lower or upper) boundary of the + // range, we extrapolate the rate all the way to the boundary in + // question. "Close enough" is defined as "up to 10% more than the + // average duration between samples within the range", see + // extrapolationThreshold below. Essentially, we are assuming a more or + // less regular spacing between samples, and if we don't see a sample + // where we would expect one, we assume the series does not cover the + // whole range, but starts and/or ends within the range. We still + // extrapolate the rate in this case, but not all the way to the + // boundary, but only by half of the average duration between samples + // (which is our guess for where the series actually starts or ends). + extrapolationThreshold := averageDurationBetweenSamples * 1.1 extrapolateToInterval := sampledInterval @@ -177,23 +187,48 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod // not a histogram, and a warning wrapped in an annotation in that case. // Otherwise, it returns the calculated histogram and an empty annotation. func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) { - prev := points[0].H - last := points[len(points)-1].H + var ( + prev = points[0].H + usingCustomBuckets = prev.UsesCustomBuckets() + last = points[len(points)-1].H + annos annotations.Annotations + ) + if last == nil { - return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) - } - minSchema := prev.Schema - if last.Schema < minSchema { - minSchema = last.Schema + return nil, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } - var annos annotations.Annotations + // We check for gauge type histograms in the loop below, but the loop + // below does not run on the first and last point, so check the first + // and last point now. + if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos)) + } + + // Null out the 1st sample if there is a counter reset between the 1st + // and 2nd. In this case, we want to ignore any incompatibility in the + // bucket layout of the 1st sample because we do not need to look at it. + if isCounter && len(points) > 1 { + second := points[1].H + if second != nil && second.DetectReset(prev) { + prev = &histogram.FloatHistogram{} + prev.Schema = second.Schema + prev.CustomValues = second.CustomValues + usingCustomBuckets = second.UsesCustomBuckets() + } + } + + if last.UsesCustomBuckets() != usingCustomBuckets { + return nil, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } // First iteration to find out two things: // - What's the smallest relevant schema? // - Are all data points histograms? - // TODO(beorn7): Find a way to check that earlier, e.g. by handing in a - // []FloatPoint and a []HistogramPoint separately. + minSchema := prev.Schema + if last.Schema < minSchema { + minSchema = last.Schema + } for _, currPoint := range points[1 : len(points)-1] { curr := currPoint.H if curr == nil { @@ -208,6 +243,9 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra if curr.Schema < minSchema { minSchema = curr.Schema } + if curr.UsesCustomBuckets() != usingCustomBuckets { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } } h := last.CopyToSchema(minSchema) @@ -241,7 +279,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra } h.CounterResetHint = histogram.GaugeType - return h.Compact(0), nil + return h.Compact(0), annos } // === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === @@ -261,46 +299,116 @@ func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === irate(node parser.ValueTypeMatrix) (Vector, Annotations) === func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(vals, enh.Out, true) + return instantValue(vals, args, enh.Out, true) } // === idelta(node model.ValMatrix) (Vector, Annotations) === func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(vals, enh.Out, false) + return instantValue(vals, args, enh.Out, false) } -func instantValue(vals []parser.Value, out Vector, isRate bool) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func instantValue(vals []parser.Value, args parser.Expressions, out Vector, isRate bool) (Vector, annotations.Annotations) { + var ( + samples = vals[0].(Matrix)[0] + metricName = samples.Metric.Get(labels.MetricName) + ss = make([]Sample, 0, 2) + annos annotations.Annotations + ) + // No sense in trying to compute a rate without at least two points. Drop // this Vector element. // TODO: add RangeTooShortWarning - if len(samples.Floats) < 2 { + if len(samples.Floats)+len(samples.Histograms) < 2 { return out, nil } - lastSample := samples.Floats[len(samples.Floats)-1] - previousSample := samples.Floats[len(samples.Floats)-2] - - var resultValue float64 - if isRate && lastSample.F < previousSample.F { - // Counter reset. - resultValue = lastSample.F - } else { - resultValue = lastSample.F - previousSample.F + // Add the last 2 float samples if they exist. + for i := max(0, len(samples.Floats)-2); i < len(samples.Floats); i++ { + ss = append(ss, Sample{ + F: samples.Floats[i].F, + T: samples.Floats[i].T, + }) } - sampledInterval := lastSample.T - previousSample.T + // Add the last 2 histogram samples into their correct position if they exist. + for i := max(0, len(samples.Histograms)-2); i < len(samples.Histograms); i++ { + s := Sample{ + H: samples.Histograms[i].H, + T: samples.Histograms[i].T, + } + switch { + case len(ss) == 0: + ss = append(ss, s) + case len(ss) == 1: + if s.T < ss[0].T { + ss = append([]Sample{s}, ss...) + } else { + ss = append(ss, s) + } + case s.T < ss[0].T: + // s is older than 1st, so discard it. + case s.T > ss[1].T: + // s is newest, so add it as 2nd and make the old 2nd the new 1st. + ss[0] = ss[1] + ss[1] = s + default: + // In all other cases, we just make s the new 1st. + // This establishes a correct order, even in the (irregular) + // case of equal timestamps. + ss[0] = s + } + } + + resultSample := ss[1] + sampledInterval := ss[1].T - ss[0].T if sampledInterval == 0 { // Avoid dividing by 0. return out, nil } + switch { + case ss[1].H == nil && ss[0].H == nil: + if !isRate || !(ss[1].F < ss[0].F) { + // Gauge, or counter without reset, or counter with NaN value. + resultSample.F = ss[1].F - ss[0].F + } + + // In case of a counter reset, we leave resultSample at + // its current value, which is already ss[1]. + case ss[1].H != nil && ss[0].H != nil: + resultSample.H = ss[1].H.Copy() + // irate should only be applied to counters. + if isRate && (ss[1].H.CounterResetHint == histogram.GaugeType || ss[0].H.CounterResetHint == histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, args.PositionRange())) + } + // idelta should only be applied to gauges. + if !isRate && (ss[1].H.CounterResetHint != histogram.GaugeType || ss[0].H.CounterResetHint != histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotGaugeWarning(metricName, args.PositionRange())) + } + if !isRate || !ss[1].H.DetectReset(ss[0].H) { + _, err := resultSample.H.Sub(ss[0].H) + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return out, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args.PositionRange())) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return out, annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args.PositionRange())) + } + } + resultSample.H.CounterResetHint = histogram.GaugeType + resultSample.H.Compact(0) + default: + // Mix of a float and a histogram. + return out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args.PositionRange())) + } if isRate { // Convert to per-second. - resultValue /= float64(sampledInterval) / 1000 + if resultSample.H == nil { + resultSample.F /= float64(sampledInterval) / 1000 + } else { + resultSample.H.Div(float64(sampledInterval) / 1000) + } } - return append(out, Sample{F: resultValue}), nil + return append(out, resultSample), annos } // Calculate the trend value at the given index i in raw data d. @@ -320,14 +428,17 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { return x + y } -// Holt-Winters is similar to a weighted moving average, where historical data has exponentially less influence on the current data. -// Holt-Winter also accounts for trends in data. The smoothing factor (0 < sf < 1) affects how historical data will affect the current -// data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects -// how trends in historical data will affect the current data. A higher trend factor increases the influence. -// of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". -func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +// Double exponential smoothing is similar to a weighted moving average, where +// historical data has exponentially less influence on the current data. It also +// accounts for trends in data. The smoothing factor (0 < sf < 1) affects how +// historical data will affect the current data. A lower smoothing factor +// increases the influence of historical data. The trend factor (0 < tf < 1) +// affects how trends in historical data will affect the current data. A higher +// trend factor increases the influence. of trends. Algorithm taken from +// https://en.wikipedia.org/wiki/Exponential_smoothing . +func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] - + metricName := samples.Metric.Get(labels.MetricName) // The smoothing factor argument. sf := vals[1].(Vector)[0].F @@ -346,6 +457,10 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode // Can't do the smoothing operation with less than two points. if l < 2 { + // Annotate mix of float and histogram. + if l == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } @@ -366,38 +481,46 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode s0, s1 = s1, x+y } - + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: s1}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: s1}), nil } +// filterFloats filters out histogram samples from the vector in-place. +func filterFloats(v Vector) Vector { + floats := v[:0] + for _, s := range v { + if s.H == nil { + floats = append(floats, s) + } + } + return floats +} + // === sort(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSort(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take descending sort with NaN first and // reverse it. - byValueSorter := vectorByReverseValueHeap(vals[0].(Vector)) + byValueSorter := vectorByReverseValueHeap(filterFloats(vals[0].(Vector))) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } // === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortDesc(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take ascending sort with NaN first and // reverse it. - byValueSorter := vectorByValueHeap(vals[0].(Vector)) + byValueSorter := vectorByValueHeap(filterFloats(vals[0].(Vector))) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } // === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - // In case the labels are the same, NaN should sort to the bottom, so take - // ascending sort with NaN first and reverse it. - var anno annotations.Annotations - vals[0], anno = funcSort(vals, args, enh) - labels := stringSliceFromArgs(args[1:]) +func funcSortByLabel(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { + lbls := stringSliceFromArgs(args[1:]) slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { - // Iterate over each given label - for _, label := range labels { + for _, label := range lbls { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) @@ -412,22 +535,18 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNode return +1 } - return 0 + // If all labels provided as arguments were equal, sort by the full label set. This ensures a consistent ordering. + return labels.Compare(a.Metric, b.Metric) }) - return vals[0].(Vector), anno + return vals[0].(Vector), nil } // === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - // In case the labels are the same, NaN should sort to the bottom, so take - // ascending sort with NaN first and reverse it. - var anno annotations.Annotations - vals[0], anno = funcSortDesc(vals, args, enh) - labels := stringSliceFromArgs(args[1:]) +func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { + lbls := stringSliceFromArgs(args[1:]) slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { - // Iterate over each given label - for _, label := range labels { + for _, label := range lbls { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) @@ -442,58 +561,58 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval return -1 } - return 0 + // If all labels provided as arguments were equal, sort by the full label set. This ensures a consistent ordering. + return -labels.Compare(a.Metric, b.Metric) }) - return vals[0].(Vector), anno + return vals[0].(Vector), nil } -// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === -func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) - min := vals[1].(Vector)[0].F - max := vals[2].(Vector)[0].F - if max < min { +func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if maxVal < minVal { return enh.Out, nil } for _, el := range vec { + if el.H != nil { + // Process only float samples. + continue + } + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel) + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: math.Max(min, math.Min(max, el.F)), + Metric: el.Metric, + F: math.Max(minVal, math.Min(maxVal, el.F)), + DropName: true, }) } return enh.Out, nil } +// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === +func funcClamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vals[0].(Vector) + minVal := vals[1].(Vector)[0].F + maxVal := vals[2].(Vector)[0].F + return clamp(vec, minVal, maxVal, enh) +} + // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === -func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcClampMax(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - max := vals[1].(Vector)[0].F - for _, el := range vec { - enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: math.Min(max, el.F), - }) - } - return enh.Out, nil + maxVal := vals[1].(Vector)[0].F + return clamp(vec, math.Inf(-1), maxVal, enh) } // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === -func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcClampMin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].F - for _, el := range vec { - enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: math.Max(min, el.F), - }) - } - return enh.Out, nil + minVal := vals[1].(Vector)[0].F + return clamp(vec, minVal, math.Inf(+1), enh) } // === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) === func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) // round returns a number rounded to toNearest. // Ties are solved by rounding up. toNearest := float64(1) @@ -502,24 +621,34 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper } // Invert as it seems to cause fewer floating point accuracy issues. toNearestInverse := 1.0 / toNearest - - for _, el := range vec { - f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse - enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: f, - }) - } - return enh.Out, nil + return simpleFloatFunc(vals, enh, func(f float64) float64 { + return math.Floor(f*toNearestInverse+0.5) / toNearestInverse + }), nil } // === Scalar(node parser.ValueTypeVector) Scalar === -func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - v := vals[0].(Vector) - if len(v) != 1 { +func funcScalar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + var ( + v = vals[0].(Vector) + value float64 + found bool + ) + + for _, s := range v { + if s.H == nil { + if found { + // More than one float found, return NaN. + return append(enh.Out, Sample{F: math.NaN()}), nil + } + found = true + value = s.F + } + } + // Return the single float if found, otherwise return NaN. + if !found { return append(enh.Out, Sample{F: math.NaN()}), nil } - return append(enh.Out, Sample{F: v[0].F}), nil + return append(enh.Out, Sample{F: value}), nil } func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { @@ -542,15 +671,36 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode metricName := firstSeries.Metric.Get(labels.MetricName) return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) } + // For the average calculation of histograms, we use incremental mean + // calculation without the help of Kahan summation (but this should + // change, see https://github.com/prometheus/prometheus/issues/14105 ). + // For floats, we improve the accuracy with the help of Kahan summation. + // For a while, we assumed that incremental mean calculation combined + // with Kahan summation (see + // https://stackoverflow.com/questions/61665473/is-it-beneficial-for-precision-to-calculate-the-incremental-mean-average + // for inspiration) is generally the preferred solution. However, it + // then turned out that direct mean calculation (still in combination + // with Kahan summation) is often more accurate. See discussion in + // https://github.com/prometheus/prometheus/issues/16714 . The problem + // with the direct mean calculation is that it can overflow float64 for + // inputs on which the incremental mean calculation works just fine. Our + // current approach is therefore to use direct mean calculation as long + // as we do not overflow (or underflow) the running sum. Once the latter + // would happen, we switch to incremental mean calculation. This seems + // to work reasonably well, but note that a deeper understanding would + // be needed to find out if maybe an earlier switch to incremental mean + // calculation would be better in terms of accuracy. Also, we could + // apply a number of additional means to improve the accuracy, like + // processing the values in a particular order. For now, we decided that + // the current implementation is accurate enough for practical purposes. if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { - count := 1 mean := s.Histograms[0].H.Copy() - for _, h := range s.Histograms[1:] { - count++ - left := h.H.Copy().Div(float64(count)) - right := mean.Copy().Div(float64(count)) + for i, h := range s.Histograms[1:] { + count := float64(i + 2) + left := h.H.Copy().Div(count) + right := mean.Copy().Div(count) toAdd, err := left.Sub(right) if err != nil { return mean, err @@ -573,45 +723,47 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return vec, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - var mean, count, c float64 - for _, f := range s.Floats { - count++ - if math.IsInf(mean, 0) { - if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) { - // The `mean` and `f.F` values are `Inf` of the same sign. They - // can't be subtracted, but the value of `mean` is correct - // already. - continue - } - if !math.IsInf(f.F, 0) && !math.IsNaN(f.F) { - // At this stage, the mean is an infinite. If the added - // value is neither an Inf or a Nan, we can keep that mean - // value. - // This is required because our calculation below removes - // the mean value, which would look like Inf += x - Inf and - // end up as a NaN. + var ( + // Pre-set the 1st sample to start the loop with the 2nd. + sum, count = s.Floats[0].F, 1. + mean, kahanC float64 + incrementalMean bool + ) + for i, f := range s.Floats[1:] { + count = float64(i + 2) + if !incrementalMean { + newSum, newC := kahanSumInc(f.F, sum, kahanC) + // Perform regular mean calculation as long as + // the sum doesn't overflow. + if !math.IsInf(newSum, 0) { + sum, kahanC = newSum, newC continue } + // Handle overflow by reverting to incremental + // calculation of the mean value. + incrementalMean = true + mean = sum / (count - 1) + kahanC /= (count - 1) } - mean, c = kahanSumInc(f.F/count-mean/count, mean, c) + q := (count - 1) / count + mean, kahanC = kahanSumInc(f.F/count, q*mean, q*kahanC) } - - if math.IsInf(mean, 0) { - return mean + if incrementalMean { + return mean + kahanC } - return mean + c + return sum/count + kahanC/count }), nil } // === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcCountOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return aggrOverTime(vals, enh, func(s Series) float64 { return float64(len(s.Floats) + len(s.Histograms)) }), nil } // === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { el := vals[0].(Matrix)[0] var f FPoint @@ -624,7 +776,7 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod h = el.Histograms[len(el.Histograms)-1] } - if h.H == nil || h.T < f.T { + if h.H == nil || (len(el.Floats) > 0 && h.T < f.T) { return append(enh.Out, Sample{ Metric: el.Metric, F: f.F, @@ -638,9 +790,15 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod // === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { values := make(vectorByValueHeap, 0, len(s.Floats)) for _, f := range s.Floats { @@ -652,47 +810,82 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode values = append(values, Sample{F: math.Abs(f.F - median)}) } return quantile(0.5, values) + }), annos +} + +// === ts_of_last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === +func funcTsOfLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + el := vals[0].(Matrix)[0] + + var tf int64 + if len(el.Floats) > 0 { + tf = el.Floats[len(el.Floats)-1].T + } + + var th int64 + if len(el.Histograms) > 0 { + th = el.Histograms[len(el.Histograms)-1].T + } + + return append(enh.Out, Sample{ + Metric: el.Metric, + F: float64(max(tf, th)) / 1000, }), nil } +// === ts_of_max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcTsOfMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { + return (cur >= maxVal) || math.IsNaN(maxVal) + }, true) +} + +// === ts_of_min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcTsOfMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { + return (cur <= maxVal) || math.IsNaN(maxVal) + }, true) +} + +// compareOverTime is a helper used by funcMaxOverTime and funcMinOverTime. +func compareOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool, returnTimestamp bool) (Vector, annotations.Annotations) { + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { + return enh.Out, nil + } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } + return aggrOverTime(vals, enh, func(s Series) float64 { + maxVal := s.Floats[0].F + tsOfMax := s.Floats[0].T + for _, f := range s.Floats { + if compareFn(f.F, maxVal) { + maxVal = f.F + tsOfMax = f.T + } + } + if returnTimestamp { + return float64(tsOfMax) / 1000 + } + return maxVal + }), annos +} + // === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. max_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. - return enh.Out, nil - } - return aggrOverTime(vals, enh, func(s Series) float64 { - max := s.Floats[0].F - for _, f := range s.Floats { - if f.F > max || math.IsNaN(max) { - max = f.F - } - } - return max - }), nil + return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { + return (cur > maxVal) || math.IsNaN(maxVal) + }, false) } // === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. min_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. - return enh.Out, nil - } - return aggrOverTime(vals, enh, func(s Series) float64 { - min := s.Floats[0].F - for _, f := range s.Floats { - if f.F < min || math.IsNaN(min) { - min = f.F - } - } - return min - }), nil + return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { + return (cur < maxVal) || math.IsNaN(maxVal) + }, false) } // === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === @@ -741,10 +934,6 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva q := vals[0].(Vector)[0].F el := vals[1].(Matrix)[0] if len(el.Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. quantile_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. return enh.Out, nil } @@ -752,7 +941,10 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva if math.IsNaN(q) || q < 0 || q > 1 { annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) } - + if len(el.Histograms) > 0 { + metricName := el.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } values := make(vectorByValueHeap, 0, len(el.Floats)) for _, f := range el.Floats { values = append(values, Sample{F: f.F}) @@ -760,15 +952,16 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva return append(enh.Out, Sample{F: quantile(q, values)}), annos } -// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. stddev_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. +func varianceOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 @@ -779,31 +972,22 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN mean, cMean = kahanSumInc(delta/count, mean, cMean) aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } - return math.Sqrt((aux + cAux) / count) - }), nil + variance := (aux + cAux) / count + if varianceToResult == nil { + return variance + } + return varianceToResult(variance) + }), annos +} + +// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return varianceOverTime(vals, args, enh, math.Sqrt) } // === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. stdvar_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. - return enh.Out, nil - } - return aggrOverTime(vals, enh, func(s Series) float64 { - var count float64 - var mean, cMean float64 - var aux, cAux float64 - for _, f := range s.Floats { - count++ - delta := f.F - (mean + cMean) - mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) - } - return (aux + cAux) / count - }), nil + return varianceOverTime(vals, args, enh, nil) } // === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === @@ -823,23 +1007,27 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe // This function will return 1 if the matrix has at least one element. // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. -func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAbsentOverTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: 1}), nil } // === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === -func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return aggrOverTime(vals, enh, func(s Series) float64 { +func funcPresentOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return aggrOverTime(vals, enh, func(_ Series) float64 { return 1 }), nil } -func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { +func simpleFloatFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { for _, el := range vals[0].(Vector) { if el.H == nil { // Process only float samples. + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel) + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: f(el.F), + Metric: el.Metric, + F: f(el.F), + DropName: true, }) } } @@ -847,127 +1035,127 @@ func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float6 } // === abs(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Abs), nil +func funcAbs(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Abs), nil } // === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Ceil), nil +func funcCeil(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Ceil), nil } // === floor(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Floor), nil +func funcFloor(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Floor), nil } // === exp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Exp), nil +func funcExp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Exp), nil } // === sqrt(Vector VectorNode) (Vector, Annotations) === -func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Sqrt), nil +func funcSqrt(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Sqrt), nil } // === ln(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Log), nil +func funcLn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Log), nil } // === log2(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Log2), nil +func funcLog2(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Log2), nil } // === log10(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Log10), nil +func funcLog10(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Log10), nil } // === sin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Sin), nil +func funcSin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Sin), nil } // === cos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Cos), nil +func funcCos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Cos), nil } // === tan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Tan), nil +func funcTan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Tan), nil } // === asin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Asin), nil +func funcAsin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Asin), nil } // === acos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Acos), nil +func funcAcos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Acos), nil } // === atan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Atan), nil +func funcAtan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Atan), nil } // === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Sinh), nil +func funcSinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Sinh), nil } // === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Cosh), nil +func funcCosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Cosh), nil } // === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Tanh), nil +func funcTanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Tanh), nil } // === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Asinh), nil +func funcAsinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Asinh), nil } // === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Acosh), nil +func funcAcosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Acosh), nil } // === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Atanh), nil +func funcAtanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, math.Atanh), nil } // === rad(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, func(v float64) float64 { +func funcRad(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, func(v float64) float64 { return v * math.Pi / 180 }), nil } // === deg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, func(v float64) float64 { +func funcDeg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, func(v float64) float64 { return v * 180 / math.Pi }), nil } // === pi() Scalar === -func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcPi(_ []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{F: math.Pi}}, nil } // === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, func(v float64) float64 { +func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFloatFunc(vals, enh, func(v float64) float64 { switch { case v < 0: return -1 @@ -980,12 +1168,16 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) } // === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTimestamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel) + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: float64(el.T) / 1000, + Metric: el.Metric, + F: float64(el.T) / 1000, + DropName: true, }) } return enh.Out, nil @@ -1055,10 +1247,15 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f // === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) === func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] + metricName := samples.Metric.Get(labels.MetricName) - // No sense in trying to compute a derivative without at least two points. + // No sense in trying to compute a derivative without at least two float points. // Drop this Vector element. if len(samples.Floats) < 2 { + // Annotate mix of float and histogram. + if len(samples.Floats) == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } @@ -1066,6 +1263,9 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 slope, _ := linearRegression(samples.Floats, samples.Floats[0].T) + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: slope}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: slope}), nil } @@ -1073,143 +1273,107 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] duration := vals[1].(Vector)[0].F - // No sense in trying to predict anything without at least two points. + metricName := samples.Metric.Get(labels.MetricName) + + // No sense in trying to predict anything without at least two float points. // Drop this Vector element. if len(samples.Floats) < 2 { + // Annotate mix of float and histogram. + if len(samples.Floats) == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } - slope, intercept := linearRegression(samples.Floats, enh.Ts) + slope, intercept := linearRegression(samples.Floats, enh.Ts) + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: slope*duration + intercept}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: slope*duration + intercept}), nil } -// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - inVec := vals[0].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue +func simpleHistogramFunc(vals []parser.Value, enh *EvalNodeHelper, f func(h *histogram.FloatHistogram) float64) Vector { + for _, el := range vals[0].(Vector) { + if el.H != nil { // Process only histogram samples. + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } + enh.Out = append(enh.Out, Sample{ + Metric: el.Metric, + F: f(el.H), + DropName: true, + }) } - enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: sample.H.Count, - }) } - return enh.Out, nil + return enh.Out +} + +// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramCount(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { + return h.Count + }), nil } // === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - inVec := vals[0].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: sample.H.Sum, - }) - } - return enh.Out, nil +func funcHistogramSum(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { + return h.Sum + }), nil } // === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - inVec := vals[0].(Vector) +func funcHistogramAvg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { + return h.Sum / h.Count + }), nil +} - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue +func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { + mean := h.Sum / h.Count + var variance, cVariance float64 + it := h.AllBucketIterator() + for it.Next() { + bucket := it.At() + if bucket.Count == 0 { + continue + } + var val float64 + switch { + case h.UsesCustomBuckets(): + // Use arithmetic mean in case of custom buckets. + val = (bucket.Upper + bucket.Lower) / 2.0 + case bucket.Lower <= 0 && bucket.Upper >= 0: + // Use zero (effectively the arithmetic mean) in the zero bucket of a standard exponential histogram. + val = 0 + default: + // Use geometric mean in case of standard exponential buckets. + val = math.Sqrt(bucket.Upper * bucket.Lower) + if bucket.Upper < 0 { + val = -val + } + } + delta := val - mean + variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) } - enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: sample.H.Sum / sample.H.Count, - }) - } - return enh.Out, nil + variance += cVariance + variance /= h.Count + if varianceToResult != nil { + variance = varianceToResult(variance) + } + return variance + }), nil } // === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - inVec := vals[0].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - mean := sample.H.Sum / sample.H.Count - var variance, cVariance float64 - it := sample.H.AllBucketIterator() - for it.Next() { - bucket := it.At() - if bucket.Count == 0 { - continue - } - var val float64 - if bucket.Lower <= 0 && 0 <= bucket.Upper { - val = 0 - } else { - val = math.Sqrt(bucket.Upper * bucket.Lower) - if bucket.Upper < 0 { - val = -val - } - } - delta := val - mean - variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) - } - variance += cVariance - variance /= sample.H.Count - enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: math.Sqrt(variance), - }) - } - return enh.Out, nil +func funcHistogramStdDev(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return histogramVariance(vals, enh, math.Sqrt) } // === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - inVec := vals[0].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - mean := sample.H.Sum / sample.H.Count - var variance, cVariance float64 - it := sample.H.AllBucketIterator() - for it.Next() { - bucket := it.At() - if bucket.Count == 0 { - continue - } - var val float64 - if bucket.Lower <= 0 && 0 <= bucket.Upper { - val = 0 - } else { - val = math.Sqrt(bucket.Upper * bucket.Lower) - if bucket.Upper < 0 { - val = -val - } - } - delta := val - mean - variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) - } - variance += cVariance - variance /= sample.H.Count - enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: variance, - }) - } - return enh.Out, nil +func funcHistogramStdVar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return histogramVariance(vals, enh, nil) } // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === @@ -1218,17 +1382,43 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev upper := vals[1].(Vector)[0].F inVec := vals[2].(Vector) - for _, sample := range inVec { - // Skip non-histogram samples. + annos := enh.resetHistograms(inVec, args[2]) + + // Deal with the native histograms. + for _, sample := range enh.nativeHistogramSamples { if sample.H == nil { + // Native histogram conflicts with classic histogram at the same timestamp, ignore. continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel) + } + hf, hfAnnos := HistogramFraction(lower, upper, sample.H, sample.Metric.Get(model.MetricNameLabel), args[0].PositionRange()) + annos.Merge(hfAnnos) enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: histogramFraction(lower, upper, sample.H), + Metric: sample.Metric, + F: hf, + DropName: true, }) } - return enh.Out, nil + + // Deal with classic histograms that have already been filtered for conflicting native histograms. + for _, mb := range enh.signatureToMetricWithBuckets { + if len(mb.buckets) == 0 { + continue + } + if !enh.enableDelayedNameRemoval { + mb.metric = mb.metric.DropReserved(schema.IsMetadataLabel) + } + + enh.Out = append(enh.Out, Sample{ + Metric: mb.metric, + F: BucketFraction(lower, upper, mb.buckets), + DropName: true, + }) + } + + return enh.Out, annos } // === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === @@ -1240,75 +1430,43 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev if math.IsNaN(q) || q < 0 || q > 1 { annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) } + annos.Merge(enh.resetHistograms(inVec, args[1])) - if enh.signatureToMetricWithBuckets == nil { - enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{} - } else { - for _, v := range enh.signatureToMetricWithBuckets { - v.buckets = v.buckets[:0] - } - } - - var histogramSamples []Sample - - for _, sample := range inVec { - // We are only looking for classic buckets here. Remember - // the histograms for later treatment. - if sample.H != nil { - histogramSamples = append(histogramSamples, sample) + // Deal with the native histograms. + for _, sample := range enh.nativeHistogramSamples { + if sample.H == nil { + // Native histogram conflicts with classic histogram at the same timestamp, ignore. continue } - - upperBound, err := strconv.ParseFloat( - sample.Metric.Get(model.BucketLabel), 64, - ) - if err != nil { - annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), args[1].PositionRange())) - continue + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel) } - enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) - mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)] - if !ok { - sample.Metric = labels.NewBuilder(sample.Metric). - Del(excludedLabels...). - Labels() - - mb = &metricWithBuckets{sample.Metric, nil} - enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb - } - mb.buckets = append(mb.buckets, bucket{upperBound, sample.F}) - } - - // Now deal with the histograms. - for _, sample := range histogramSamples { - // We have to reconstruct the exact same signature as above for - // a classic histogram, just ignoring any le label. - enh.lblBuf = sample.Metric.Bytes(enh.lblBuf) - if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 { - // At this data point, we have classic histogram - // buckets and a native histogram with the same name and - // labels. Do not evaluate anything. - annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), args[1].PositionRange())) - delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) - continue - } - + hq, hqAnnos := HistogramQuantile(q, sample.H, sample.Metric.Get(model.MetricNameLabel), args[0].PositionRange()) + annos.Merge(hqAnnos) enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: histogramQuantile(q, sample.H), + Metric: sample.Metric, + F: hq, + DropName: true, }) } + // Deal with classic histograms that have already been filtered for conflicting native histograms. for _, mb := range enh.signatureToMetricWithBuckets { if len(mb.buckets) > 0 { - res, forcedMonotonicity, _ := bucketQuantile(q, mb.buckets) - enh.Out = append(enh.Out, Sample{ - Metric: mb.metric, - F: res, - }) + res, forcedMonotonicity, _ := BucketQuantile(q, mb.buckets) if forcedMonotonicity { annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange())) } + + if !enh.enableDelayedNameRemoval { + mb.metric = mb.metric.DropReserved(schema.IsMetadataLabel) + } + + enh.Out = append(enh.Out, Sample{ + Metric: mb.metric, + F: res, + DropName: true, + }) } } @@ -1316,60 +1474,97 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev } // === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcResets(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats histograms := vals[0].(Matrix)[0].Histograms resets := 0 - - if len(floats) > 1 { - prev := floats[0].F - for _, sample := range floats[1:] { - current := sample.F - if current < prev { - resets++ - } - prev = current - } + if len(floats) == 0 && len(histograms) == 0 { + return enh.Out, nil } - if len(histograms) > 1 { - prev := histograms[0].H - for _, sample := range histograms[1:] { - current := sample.H - if current.DetectReset(prev) { + var prevSample, curSample Sample + for iFloat, iHistogram := 0, 0; iFloat < len(floats) || iHistogram < len(histograms); { + switch { + // Process a float sample if no histogram sample remains or its timestamp is earlier. + // Process a histogram sample if no float sample remains or its timestamp is earlier. + case iHistogram >= len(histograms) || iFloat < len(floats) && floats[iFloat].T < histograms[iHistogram].T: + curSample.F = floats[iFloat].F + curSample.H = nil + iFloat++ + case iFloat >= len(floats) || iHistogram < len(histograms) && floats[iFloat].T > histograms[iHistogram].T: + curSample.H = histograms[iHistogram].H + iHistogram++ + } + // Skip the comparison for the first sample, just initialize prevSample. + if iFloat+iHistogram == 1 { + prevSample = curSample + continue + } + switch { + case prevSample.H == nil && curSample.H == nil: + if curSample.F < prevSample.F { + resets++ + } + case prevSample.H != nil && curSample.H == nil, prevSample.H == nil && curSample.H != nil: + resets++ + case prevSample.H != nil && curSample.H != nil: + if curSample.H.DetectReset(prevSample.H) { resets++ } - prev = current } + prevSample = curSample } return append(enh.Out, Sample{F: float64(resets)}), nil } // === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcChanges(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats + histograms := vals[0].(Matrix)[0].Histograms changes := 0 - - if len(floats) == 0 { - // TODO(beorn7): Only histogram values, still need to add support. + if len(floats) == 0 && len(histograms) == 0 { return enh.Out, nil } - prev := floats[0].F - for _, sample := range floats[1:] { - current := sample.F - if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { - changes++ + var prevSample, curSample Sample + for iFloat, iHistogram := 0, 0; iFloat < len(floats) || iHistogram < len(histograms); { + switch { + // Process a float sample if no histogram sample remains or its timestamp is earlier. + // Process a histogram sample if no float sample remains or its timestamp is earlier. + case iHistogram >= len(histograms) || iFloat < len(floats) && floats[iFloat].T < histograms[iHistogram].T: + curSample.F = floats[iFloat].F + curSample.H = nil + iFloat++ + case iFloat >= len(floats) || iHistogram < len(histograms) && floats[iFloat].T > histograms[iHistogram].T: + curSample.H = histograms[iHistogram].H + iHistogram++ } - prev = current + // Skip the comparison for the first sample, just initialize prevSample. + if iFloat+iHistogram == 1 { + prevSample = curSample + continue + } + switch { + case prevSample.H == nil && curSample.H == nil: + if curSample.F != prevSample.F && !(math.IsNaN(curSample.F) && math.IsNaN(prevSample.F)) { + changes++ + } + case prevSample.H != nil && curSample.H == nil, prevSample.H == nil && curSample.H != nil: + changes++ + case prevSample.H != nil && curSample.H != nil: + if !curSample.H.Equals(prevSample.H) { + changes++ + } + } + prevSample = curSample } return append(enh.Out, Sample{F: float64(changes)}), nil } // label_replace function operates only on series; does not look at timestamps or values. -func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, annotations.Annotations) { +func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { var ( dst = stringFromArg(args[1]) repl = stringFromArg(args[2]) @@ -1377,15 +1572,15 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an regexStr = stringFromArg(args[4]) ) - regex, err := regexp.Compile("^(?:" + regexStr + ")$") + regex, err := regexp.Compile("^(?s:" + regexStr + ")$") if err != nil { panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) } - if !model.LabelNameRE.MatchString(dst) { + if !model.LabelName(dst).IsValid() { panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) } - val, ws := ev.eval(args[0]) + val, ws := ev.eval(ctx, args[0]) matrix := val.(Matrix) lb := labels.NewBuilder(labels.EmptyLabels()) @@ -1397,6 +1592,11 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an lb.Reset(el.Metric) lb.Set(dst, string(res)) matrix[i].Metric = lb.Labels() + if dst == model.MetricNameLabel { + matrix[i].DropName = false + } else { + matrix[i].DropName = el.DropName + } } } if matrix.ContainsSameLabelset() { @@ -1406,13 +1606,8 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an return matrix, ws } -// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) === -func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // === Vector(s Scalar) (Vector, Annotations) === -func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcVector(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{ Metric: labels.Labels{}, @@ -1421,7 +1616,7 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe } // label_join function operates only on series; does not look at timestamps or values. -func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annotations.Annotations) { +func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { var ( dst = stringFromArg(args[1]) sep = stringFromArg(args[2]) @@ -1438,7 +1633,7 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst)) } - val, ws := ev.eval(args[0]) + val, ws := ev.eval(ctx, args[0]) matrix := val.(Matrix) srcVals := make([]string, len(srcLabels)) lb := labels.NewBuilder(labels.EmptyLabels()) @@ -1451,16 +1646,20 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot lb.Reset(el.Metric) lb.Set(dst, strval) matrix[i].Metric = lb.Labels() + + if dst == model.MetricNameLabel { + matrix[i].DropName = false + } else { + matrix[i].DropName = el.DropName + } + } + if matrix.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") } return matrix, ws } -// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) === -func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // Common code for date related functions. func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { if len(vals) == 0 { @@ -1472,66 +1671,74 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo } for _, el := range vals[0].(Vector) { + if el.H != nil { + // Ignore histogram sample. + continue + } t := time.Unix(int64(el.F), 0).UTC() + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropReserved(schema.IsMetadataLabel) + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: f(t), + Metric: el.Metric, + F: f(t), + DropName: true, }) } return enh.Out } // === days_in_month(v Vector) Scalar === -func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDaysInMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) }), nil } // === day_of_month(v Vector) Scalar === -func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDayOfMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Day()) }), nil } // === day_of_week(v Vector) Scalar === -func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDayOfWeek(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Weekday()) }), nil } // === day_of_year(v Vector) Scalar === -func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDayOfYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.YearDay()) }), nil } // === hour(v Vector) Scalar === -func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHour(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Hour()) }), nil } // === minute(v Vector) Scalar === -func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcMinute(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Minute()) }), nil } // === month(v Vector) Scalar === -func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Month()) }), nil } // === year(v Vector) Scalar === -func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Year()) }), nil @@ -1539,82 +1746,86 @@ func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // FunctionCalls is a list of all functions supported by PromQL, including their types. var FunctionCalls = map[string]FunctionCall{ - "abs": funcAbs, - "absent": funcAbsent, - "absent_over_time": funcAbsentOverTime, - "acos": funcAcos, - "acosh": funcAcosh, - "asin": funcAsin, - "asinh": funcAsinh, - "atan": funcAtan, - "atanh": funcAtanh, - "avg_over_time": funcAvgOverTime, - "ceil": funcCeil, - "changes": funcChanges, - "clamp": funcClamp, - "clamp_max": funcClampMax, - "clamp_min": funcClampMin, - "cos": funcCos, - "cosh": funcCosh, - "count_over_time": funcCountOverTime, - "days_in_month": funcDaysInMonth, - "day_of_month": funcDayOfMonth, - "day_of_week": funcDayOfWeek, - "day_of_year": funcDayOfYear, - "deg": funcDeg, - "delta": funcDelta, - "deriv": funcDeriv, - "exp": funcExp, - "floor": funcFloor, - "histogram_avg": funcHistogramAvg, - "histogram_count": funcHistogramCount, - "histogram_fraction": funcHistogramFraction, - "histogram_quantile": funcHistogramQuantile, - "histogram_sum": funcHistogramSum, - "histogram_stddev": funcHistogramStdDev, - "histogram_stdvar": funcHistogramStdVar, - "holt_winters": funcHoltWinters, - "hour": funcHour, - "idelta": funcIdelta, - "increase": funcIncrease, - "irate": funcIrate, - "label_replace": funcLabelReplace, - "label_join": funcLabelJoin, - "ln": funcLn, - "log10": funcLog10, - "log2": funcLog2, - "last_over_time": funcLastOverTime, - "mad_over_time": funcMadOverTime, - "max_over_time": funcMaxOverTime, - "min_over_time": funcMinOverTime, - "minute": funcMinute, - "month": funcMonth, - "pi": funcPi, - "predict_linear": funcPredictLinear, - "present_over_time": funcPresentOverTime, - "quantile_over_time": funcQuantileOverTime, - "rad": funcRad, - "rate": funcRate, - "resets": funcResets, - "round": funcRound, - "scalar": funcScalar, - "sgn": funcSgn, - "sin": funcSin, - "sinh": funcSinh, - "sort": funcSort, - "sort_desc": funcSortDesc, - "sort_by_label": funcSortByLabel, - "sort_by_label_desc": funcSortByLabelDesc, - "sqrt": funcSqrt, - "stddev_over_time": funcStddevOverTime, - "stdvar_over_time": funcStdvarOverTime, - "sum_over_time": funcSumOverTime, - "tan": funcTan, - "tanh": funcTanh, - "time": funcTime, - "timestamp": funcTimestamp, - "vector": funcVector, - "year": funcYear, + "abs": funcAbs, + "absent": funcAbsent, + "absent_over_time": funcAbsentOverTime, + "acos": funcAcos, + "acosh": funcAcosh, + "asin": funcAsin, + "asinh": funcAsinh, + "atan": funcAtan, + "atanh": funcAtanh, + "avg_over_time": funcAvgOverTime, + "ceil": funcCeil, + "changes": funcChanges, + "clamp": funcClamp, + "clamp_max": funcClampMax, + "clamp_min": funcClampMin, + "cos": funcCos, + "cosh": funcCosh, + "count_over_time": funcCountOverTime, + "days_in_month": funcDaysInMonth, + "day_of_month": funcDayOfMonth, + "day_of_week": funcDayOfWeek, + "day_of_year": funcDayOfYear, + "deg": funcDeg, + "delta": funcDelta, + "deriv": funcDeriv, + "exp": funcExp, + "floor": funcFloor, + "histogram_avg": funcHistogramAvg, + "histogram_count": funcHistogramCount, + "histogram_fraction": funcHistogramFraction, + "histogram_quantile": funcHistogramQuantile, + "histogram_sum": funcHistogramSum, + "histogram_stddev": funcHistogramStdDev, + "histogram_stdvar": funcHistogramStdVar, + "double_exponential_smoothing": funcDoubleExponentialSmoothing, + "hour": funcHour, + "idelta": funcIdelta, + "increase": funcIncrease, + "info": nil, + "irate": funcIrate, + "label_replace": nil, // evalLabelReplace not called via this map. + "label_join": nil, // evalLabelJoin not called via this map. + "ln": funcLn, + "log10": funcLog10, + "log2": funcLog2, + "last_over_time": funcLastOverTime, + "mad_over_time": funcMadOverTime, + "max_over_time": funcMaxOverTime, + "min_over_time": funcMinOverTime, + "ts_of_last_over_time": funcTsOfLastOverTime, + "ts_of_max_over_time": funcTsOfMaxOverTime, + "ts_of_min_over_time": funcTsOfMinOverTime, + "minute": funcMinute, + "month": funcMonth, + "pi": funcPi, + "predict_linear": funcPredictLinear, + "present_over_time": funcPresentOverTime, + "quantile_over_time": funcQuantileOverTime, + "rad": funcRad, + "rate": funcRate, + "resets": funcResets, + "round": funcRound, + "scalar": funcScalar, + "sgn": funcSgn, + "sin": funcSin, + "sinh": funcSinh, + "sort": funcSort, + "sort_desc": funcSortDesc, + "sort_by_label": funcSortByLabel, + "sort_by_label_desc": funcSortByLabelDesc, + "sqrt": funcSqrt, + "stddev_over_time": funcStddevOverTime, + "stdvar_over_time": funcStdvarOverTime, + "sum_over_time": funcSumOverTime, + "tan": funcTan, + "tanh": funcTanh, + "time": funcTime, + "timestamp": funcTimestamp, + "vector": funcVector, + "year": funcYear, } // AtModifierUnsafeFunctions are the functions whose result @@ -1639,16 +1850,7 @@ func (s vectorByValueHeap) Len() int { } func (s vectorByValueHeap) Less(i, j int) bool { - // We compare histograms based on their sum of observations. - // TODO(beorn7): Is that what we want? vi, vj := s[i].F, s[j].F - if s[i].H != nil { - vi = s[i].H.Sum - } - if s[j].H != nil { - vj = s[j].H.Sum - } - if math.IsNaN(vi) { return true } @@ -1678,16 +1880,7 @@ func (s vectorByReverseValueHeap) Len() int { } func (s vectorByReverseValueHeap) Less(i, j int) bool { - // We compare histograms based on their sum of observations. - // TODO(beorn7): Is that what we want? vi, vj := s[i].F, s[j].F - if s[i].H != nil { - vi = s[i].H.Sum - } - if s[j].H != nil { - vj = s[j].H.Sum - } - if math.IsNaN(vi) { return true } diff --git a/promql/functions_test.go b/promql/functions_test.go index aef59c8379..9ee0ba51dc 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/util/teststorage" ) @@ -39,7 +40,7 @@ func TestDeriv(t *testing.T) { MaxSamples: 10000, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) a := storage.Appender(context.Background()) diff --git a/promql/fuzz.go b/promql/fuzz.go index 3fd50b9496..362b33301d 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -61,8 +61,8 @@ const ( var symbolTable = labels.NewSymbolTable() func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType, false, symbolTable) - if warning != nil { + p, warning := textparse.New(in, contentType, "", false, false, false, symbolTable) + if p == nil || warning != nil { // An invalid content type is being passed, which should not happen // in this context. panic(warning) @@ -91,7 +91,7 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int { // Note that this is not the parser for the text-based exposition-format; that // lives in github.com/prometheus/client_golang/text. func FuzzParseMetric(in []byte) int { - return fuzzParseMetricWithContentType(in, "") + return fuzzParseMetricWithContentType(in, "text/plain") } func FuzzParseOpenMetric(in []byte) int { diff --git a/promql/fuzz_test.go b/promql/fuzz_test.go index 1f0bbaa662..4a26798ded 100644 --- a/promql/fuzz_test.go +++ b/promql/fuzz_test.go @@ -29,7 +29,7 @@ func TestfuzzParseMetricWithContentTypePanicOnInvalid(t *testing.T) { } else { err, ok := p.(error) require.True(t, ok) - require.Contains(t, err.Error(), "duplicate parameter name") + require.ErrorContains(t, err, "duplicate parameter name") } }() diff --git a/promql/histogram_stats_iterator.go b/promql/histogram_stats_iterator.go index dfafea5f8c..cbc717cac0 100644 --- a/promql/histogram_stats_iterator.go +++ b/promql/histogram_stats_iterator.go @@ -19,7 +19,11 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" ) -type histogramStatsIterator struct { +// HistogramStatsIterator is an iterator that returns histogram objects +// which have only their sum and count values populated. The iterator handles +// counter reset detection internally and sets the counter reset hint accordingly +// in each returned histogram object. +type HistogramStatsIterator struct { chunkenc.Iterator currentH *histogram.Histogram @@ -27,28 +31,33 @@ type histogramStatsIterator struct { currentFH *histogram.FloatHistogram lastFH *histogram.FloatHistogram + + currentSeriesRead bool } -// NewHistogramStatsIterator creates an iterator which returns histogram objects -// which have only their sum and count values populated. The iterator handles -// counter reset detection internally and sets the counter reset hint accordingly -// in each returned histogram objects. -func NewHistogramStatsIterator(it chunkenc.Iterator) chunkenc.Iterator { - return &histogramStatsIterator{ +// NewHistogramStatsIterator creates a new HistogramStatsIterator. +func NewHistogramStatsIterator(it chunkenc.Iterator) *HistogramStatsIterator { + return &HistogramStatsIterator{ Iterator: it, currentH: &histogram.Histogram{}, currentFH: &histogram.FloatHistogram{}, } } +// Reset resets this iterator for use with a new underlying iterator, reusing +// objects already allocated where possible. +func (f *HistogramStatsIterator) Reset(it chunkenc.Iterator) { + f.Iterator = it + f.currentSeriesRead = false +} + // AtHistogram returns the next timestamp/histogram pair. The counter reset // detection is guaranteed to be correct only when the caller does not switch // between AtHistogram and AtFloatHistogram calls. -func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { +func (f *HistogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { var t int64 t, f.currentH = f.Iterator.AtHistogram(f.currentH) if value.IsStaleNaN(f.currentH.Sum) { - f.setLastH(f.currentH) h = &histogram.Histogram{Sum: f.currentH.Sum} return t, h } @@ -63,9 +72,13 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi return t, h } - h.CounterResetHint = f.getResetHint(f.currentH) - h.Count = f.currentH.Count - h.Sum = f.currentH.Sum + returnValue := histogram.Histogram{ + CounterResetHint: f.getResetHint(f.currentH), + Count: f.currentH.Count, + Sum: f.currentH.Sum, + } + returnValue.CopyTo(h) + f.setLastH(f.currentH) return t, h } @@ -73,11 +86,10 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi // AtFloatHistogram returns the next timestamp/float histogram pair. The counter // reset detection is guaranteed to be correct only when the caller does not // switch between AtHistogram and AtFloatHistogram calls. -func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { +func (f *HistogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { var t int64 t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH) if value.IsStaleNaN(f.currentFH.Sum) { - f.setLastFH(f.currentFH) return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum} } @@ -91,52 +103,72 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) return t, fh } - fh.CounterResetHint = f.getFloatResetHint(f.currentFH.CounterResetHint) - fh.Count = f.currentFH.Count - fh.Sum = f.currentFH.Sum + returnValue := histogram.FloatHistogram{ + CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint), + Count: f.currentFH.Count, + Sum: f.currentFH.Sum, + } + returnValue.CopyTo(fh) + f.setLastFH(f.currentFH) return t, fh } -func (f *histogramStatsIterator) setLastH(h *histogram.Histogram) { +func (f *HistogramStatsIterator) setLastH(h *histogram.Histogram) { + f.lastFH = nil if f.lastH == nil { f.lastH = h.Copy() } else { h.CopyTo(f.lastH) } + + f.currentSeriesRead = true } -func (f *histogramStatsIterator) setLastFH(fh *histogram.FloatHistogram) { +func (f *HistogramStatsIterator) setLastFH(fh *histogram.FloatHistogram) { + f.lastH = nil if f.lastFH == nil { f.lastFH = fh.Copy() } else { fh.CopyTo(f.lastFH) } + + f.currentSeriesRead = true } -func (f *histogramStatsIterator) getFloatResetHint(hint histogram.CounterResetHint) histogram.CounterResetHint { +func (f *HistogramStatsIterator) getFloatResetHint(hint histogram.CounterResetHint) histogram.CounterResetHint { if hint != histogram.UnknownCounterReset { return hint } - if f.lastFH == nil { - return histogram.NotCounterReset + prevFH := f.lastFH + if prevFH == nil || !f.currentSeriesRead { + if f.lastH == nil || !f.currentSeriesRead { + // We don't know if there's a counter reset. + return histogram.UnknownCounterReset + } + prevFH = f.lastH.ToFloat(nil) } - - if f.currentFH.DetectReset(f.lastFH) { + if f.currentFH.DetectReset(prevFH) { return histogram.CounterReset } return histogram.NotCounterReset } -func (f *histogramStatsIterator) getResetHint(h *histogram.Histogram) histogram.CounterResetHint { +func (f *HistogramStatsIterator) getResetHint(h *histogram.Histogram) histogram.CounterResetHint { if h.CounterResetHint != histogram.UnknownCounterReset { return h.CounterResetHint } - if f.lastH == nil { - return histogram.NotCounterReset + var prevFH *histogram.FloatHistogram + if f.lastH == nil || !f.currentSeriesRead { + if f.lastFH == nil || !f.currentSeriesRead { + // We don't know if there's a counter reset. + return histogram.UnknownCounterReset + } + prevFH = f.lastFH + } else { + prevFH = f.lastH.ToFloat(nil) } - - fh, prevFH := h.ToFloat(nil), f.lastH.ToFloat(nil) + fh := h.ToFloat(nil) if fh.DetectReset(prevFH) { return histogram.CounterReset } diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go index b71a9d6029..cc570c9730 100644 --- a/promql/histogram_stats_iterator_test.go +++ b/promql/histogram_stats_iterator_test.go @@ -14,62 +14,201 @@ package promql import ( + "math" "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/tsdbutil" ) func TestHistogramStatsDecoding(t *testing.T) { - histograms := []*histogram.Histogram{ - tsdbutil.GenerateTestHistogram(0), - tsdbutil.GenerateTestHistogram(1), - tsdbutil.GenerateTestHistogram(2), - tsdbutil.GenerateTestHistogram(2), + cases := []struct { + name string + histograms []*histogram.Histogram + expectedHints []histogram.CounterResetHint + }{ + { + name: "unknown counter reset for later sample triggers detection", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(2, histogram.CounterReset), + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.NotCounterReset, + histogram.CounterReset, + histogram.NotCounterReset, + }, + }, + { + name: "unknown counter reset for first sample does not trigger detection", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(0, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(2, histogram.CounterReset), + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.UnknownCounterReset, + histogram.NotCounterReset, + histogram.CounterReset, + histogram.NotCounterReset, + }, + }, + { + name: "stale sample before unknown reset hint", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + {Sum: math.Float64frombits(value.StaleNaN)}, + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.NotCounterReset, + histogram.UnknownCounterReset, + histogram.NotCounterReset, + }, + }, + { + name: "unknown counter reset at the beginning", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.UnknownCounterReset, + }, + }, + { + name: "detect real counter reset", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.UnknownCounterReset, + histogram.CounterReset, + }, + }, + { + name: "detect real counter reset after stale NaN", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + {Sum: math.Float64frombits(value.StaleNaN)}, + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.UnknownCounterReset, + histogram.UnknownCounterReset, + histogram.CounterReset, + }, + }, } - histograms[0].CounterResetHint = histogram.NotCounterReset - histograms[1].CounterResetHint = histogram.UnknownCounterReset - histograms[2].CounterResetHint = histogram.CounterReset - histograms[3].CounterResetHint = histogram.UnknownCounterReset + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Run("histogram_stats", func(t *testing.T) { + check := func(statsIterator *HistogramStatsIterator) { + decodedStats := make([]*histogram.Histogram, 0) + + for statsIterator.Next() != chunkenc.ValNone { + _, h := statsIterator.AtHistogram(nil) + decodedStats = append(decodedStats, h) + } + + for i := 0; i < len(tc.histograms); i++ { + require.Equalf(t, tc.expectedHints[i], decodedStats[i].CounterResetHint, "mismatch in counter reset hint for histogram %d", i) + h := tc.histograms[i] + if value.IsStaleNaN(h.Sum) { + require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) + require.Equal(t, uint64(0), decodedStats[i].Count) + } else { + require.Equal(t, tc.histograms[i].Count, decodedStats[i].Count) + require.Equal(t, tc.histograms[i].Sum, decodedStats[i].Sum) + } + } + } + + // Check that we get the expected results with a fresh iterator. + statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil)) + check(statsIterator) + + // Check that we get the same results if we reset and reuse that iterator. + statsIterator.Reset(newHistogramSeries(tc.histograms).Iterator(nil)) + check(statsIterator) + }) + t.Run("float_histogram_stats", func(t *testing.T) { + check := func(statsIterator *HistogramStatsIterator) { + decodedStats := make([]*histogram.FloatHistogram, 0) + for statsIterator.Next() != chunkenc.ValNone { + _, h := statsIterator.AtFloatHistogram(nil) + decodedStats = append(decodedStats, h) + } + for i := 0; i < len(tc.histograms); i++ { + require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint) + fh := tc.histograms[i].ToFloat(nil) + if value.IsStaleNaN(fh.Sum) { + require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) + require.Equal(t, float64(0), decodedStats[i].Count) + } else { + require.Equal(t, fh.Count, decodedStats[i].Count) + require.Equal(t, fh.Sum, decodedStats[i].Sum) + } + } + } + + // Check that we get the expected results with a fresh iterator. + statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil)) + check(statsIterator) + + // Check that we get the same results if we reset and reuse that iterator. + statsIterator.Reset(newHistogramSeries(tc.histograms).Iterator(nil)) + check(statsIterator) + }) + }) + } +} + +func TestHistogramStatsMixedUse(t *testing.T) { + histograms := []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(4, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(0, histogram.UnknownCounterReset), + } + + series := newHistogramSeries(histograms) + it := series.Iterator(nil) + + statsIterator := NewHistogramStatsIterator(it) expectedHints := []histogram.CounterResetHint{ - histogram.NotCounterReset, + histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.CounterReset, - histogram.NotCounterReset, } + actualHints := make([]histogram.CounterResetHint, 3) + typ := statsIterator.Next() + require.Equal(t, chunkenc.ValHistogram, typ) + _, h := statsIterator.AtHistogram(nil) + actualHints[0] = h.CounterResetHint + typ = statsIterator.Next() + require.Equal(t, chunkenc.ValHistogram, typ) + _, h = statsIterator.AtHistogram(nil) + actualHints[1] = h.CounterResetHint + typ = statsIterator.Next() + require.Equal(t, chunkenc.ValHistogram, typ) + _, fh := statsIterator.AtFloatHistogram(nil) + actualHints[2] = fh.CounterResetHint - t.Run("histogram_stats", func(t *testing.T) { - decodedStats := make([]*histogram.Histogram, 0) - statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil)) - for statsIterator.Next() != chunkenc.ValNone { - _, h := statsIterator.AtHistogram(nil) - decodedStats = append(decodedStats, h) - } - for i := 0; i < len(histograms); i++ { - require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint) - require.Equal(t, histograms[i].Count, decodedStats[i].Count) - require.Equal(t, histograms[i].Sum, decodedStats[i].Sum) - } - }) - t.Run("float_histogram_stats", func(t *testing.T) { - decodedStats := make([]*histogram.FloatHistogram, 0) - statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil)) - for statsIterator.Next() != chunkenc.ValNone { - _, h := statsIterator.AtFloatHistogram(nil) - decodedStats = append(decodedStats, h) - } - for i := 0; i < len(histograms); i++ { - fh := histograms[i].ToFloat(nil) - require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint) - require.Equal(t, fh.Count, decodedStats[i].Count) - require.Equal(t, fh.Sum, decodedStats[i].Sum) - } - }) + require.Equal(t, chunkenc.ValNone, statsIterator.Next()) + require.Equal(t, expectedHints, actualHints) } type histogramSeries struct { @@ -104,7 +243,7 @@ func (h *histogramIterator) Next() chunkenc.ValueType { return chunkenc.ValNone } -func (h *histogramIterator) Seek(t int64) chunkenc.ValueType { panic("not implemented") } +func (h *histogramIterator) Seek(_ int64) chunkenc.ValueType { panic("not implemented") } func (h *histogramIterator) At() (int64, float64) { panic("not implemented") } diff --git a/promql/info.go b/promql/info.go new file mode 100644 index 0000000000..0197330822 --- /dev/null +++ b/promql/info.go @@ -0,0 +1,454 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "context" + "errors" + "fmt" + "slices" + "strings" + + "github.com/grafana/regexp" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" +) + +const targetInfo = "target_info" + +// identifyingLabels are the labels we consider as identifying for info metrics. +// Currently hard coded, so we don't need knowledge of individual info metrics. +var identifyingLabels = []string{"instance", "job"} + +// evalInfo implements the info PromQL function. +func (ev *evaluator) evalInfo(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { + val, annots := ev.eval(ctx, args[0]) + mat := val.(Matrix) + // Map from data label name to matchers. + dataLabelMatchers := map[string][]*labels.Matcher{} + var infoNameMatchers []*labels.Matcher + if len(args) > 1 { + // TODO: Introduce a dedicated LabelSelector type. + labelSelector := args[1].(*parser.VectorSelector) + for _, m := range labelSelector.LabelMatchers { + dataLabelMatchers[m.Name] = append(dataLabelMatchers[m.Name], m) + if m.Name == labels.MetricName { + infoNameMatchers = append(infoNameMatchers, m) + } + } + } else { + infoNameMatchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, targetInfo)} + } + + // Don't try to enrich info series. + ignoreSeries := map[int]struct{}{} +loop: + for i, s := range mat { + name := s.Metric.Get(labels.MetricName) + for _, m := range infoNameMatchers { + if m.Matches(name) { + ignoreSeries[i] = struct{}{} + continue loop + } + } + } + + selectHints := ev.infoSelectHints(args[0]) + infoSeries, ws, err := ev.fetchInfoSeries(ctx, mat, ignoreSeries, dataLabelMatchers, selectHints) + if err != nil { + ev.error(err) + } + annots.Merge(ws) + + res, ws := ev.combineWithInfoSeries(ctx, mat, infoSeries, ignoreSeries, dataLabelMatchers) + annots.Merge(ws) + return res, annots +} + +// infoSelectHints calculates the storage.SelectHints for selecting info series, given expr (first argument to info call). +func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints { + var nodeTimestamp *int64 + var offset int64 + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { + switch n := node.(type) { + case *parser.VectorSelector: + if n.Timestamp != nil { + nodeTimestamp = n.Timestamp + } + offset = durationMilliseconds(n.OriginalOffset) + return errors.New("end traversal") + default: + return nil + } + }) + + start := ev.startTimestamp + end := ev.endTimestamp + if nodeTimestamp != nil { + // The timestamp on the selector overrides everything. + start = *nodeTimestamp + end = *nodeTimestamp + } + // Reduce the start by one fewer ms than the lookback delta + // because wo want to exclude samples that are precisely the + // lookback delta before the eval time. + start -= durationMilliseconds(ev.lookbackDelta) - 1 + start -= offset + end -= offset + + return storage.SelectHints{ + Start: start, + End: end, + Step: ev.interval, + Func: "info", + } +} + +// fetchInfoSeries fetches info series given matching identifying labels in mat. +// Series in ignoreSeries are not fetched. +// dataLabelMatchers may be mutated. +func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeries map[int]struct{}, dataLabelMatchers map[string][]*labels.Matcher, selectHints storage.SelectHints) (Matrix, annotations.Annotations, error) { + // A map of values for all identifying labels we are interested in. + idLblValues := map[string]map[string]struct{}{} + for i, s := range mat { + if _, exists := ignoreSeries[i]; exists { + continue + } + + // Register relevant values per identifying label for this series. + for _, l := range identifyingLabels { + val := s.Metric.Get(l) + if val == "" { + continue + } + + if idLblValues[l] == nil { + idLblValues[l] = map[string]struct{}{} + } + idLblValues[l][val] = struct{}{} + } + } + if len(idLblValues) == 0 { + return nil, nil, nil + } + + // Generate regexps for every interesting value per identifying label. + var sb strings.Builder + idLblRegexps := make(map[string]string, len(idLblValues)) + for name, vals := range idLblValues { + sb.Reset() + i := 0 + for v := range vals { + if i > 0 { + sb.WriteRune('|') + } + sb.WriteString(regexp.QuoteMeta(v)) + i++ + } + idLblRegexps[name] = sb.String() + } + + var infoLabelMatchers []*labels.Matcher + for name, re := range idLblRegexps { + infoLabelMatchers = append(infoLabelMatchers, labels.MustNewMatcher(labels.MatchRegexp, name, re)) + } + var nameMatcher *labels.Matcher + for name, ms := range dataLabelMatchers { + for i, m := range ms { + if m.Name == labels.MetricName { + nameMatcher = m + ms = slices.Delete(ms, i, i+1) + } + infoLabelMatchers = append(infoLabelMatchers, m) + } + if len(ms) > 0 { + dataLabelMatchers[name] = ms + } else { + delete(dataLabelMatchers, name) + } + } + if nameMatcher == nil { + // Default to using the target_info metric. + infoLabelMatchers = append([]*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, targetInfo)}, infoLabelMatchers...) + } + + infoIt := ev.querier.Select(ctx, false, &selectHints, infoLabelMatchers...) + infoSeries, ws, err := expandSeriesSet(ctx, infoIt) + if err != nil { + return nil, ws, err + } + + infoMat := ev.evalSeries(ctx, infoSeries, 0, true) + return infoMat, ws, nil +} + +// combineWithInfoSeries combines mat with select data labels from infoMat. +func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Matrix, ignoreSeries map[int]struct{}, dataLabelMatchers map[string][]*labels.Matcher) (Matrix, annotations.Annotations) { + buf := make([]byte, 0, 1024) + lb := labels.NewScratchBuilder(0) + sigFunction := func(name string) func(labels.Labels) string { + return func(lset labels.Labels) string { + lb.Reset() + lb.Add(labels.MetricName, name) + lset.MatchLabels(true, identifyingLabels...).Range(func(l labels.Label) { + lb.Add(l.Name, l.Value) + }) + lb.Sort() + return string(lb.Labels().Bytes(buf)) + } + } + + infoMetrics := map[string]struct{}{} + for _, is := range infoMat { + lblMap := is.Metric.Map() + infoMetrics[lblMap[labels.MetricName]] = struct{}{} + } + sigfs := make(map[string]func(labels.Labels) string, len(infoMetrics)) + for name := range infoMetrics { + sigfs[name] = sigFunction(name) + } + + // Keep a copy of the original point slices so they can be returned to the pool. + origMatrices := []Matrix{ + make(Matrix, len(mat)), + make(Matrix, len(infoMat)), + } + copy(origMatrices[0], mat) + copy(origMatrices[1], infoMat) + + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + originalNumSamples := ev.currentSamples + + // Create an output vector that is as big as the input matrix with + // the most time series. + biggestLen := max(len(mat), len(infoMat)) + baseVector := make(Vector, 0, len(mat)) + infoVector := make(Vector, 0, len(infoMat)) + enh := &EvalNodeHelper{ + Out: make(Vector, 0, biggestLen), + } + type seriesAndTimestamp struct { + Series + ts int64 + } + seriess := make(map[uint64]seriesAndTimestamp, biggestLen) // Output series by series hash. + tempNumSamples := ev.currentSamples + + // For every base series, compute signature per info metric. + baseSigs := make([]map[string]string, 0, len(mat)) + for _, s := range mat { + sigs := make(map[string]string, len(infoMetrics)) + for infoName := range infoMetrics { + sigs[infoName] = sigfs[infoName](s.Metric) + } + baseSigs = append(baseSigs, sigs) + } + + infoSigs := make([]string, 0, len(infoMat)) + for _, s := range infoMat { + name := s.Metric.Map()[labels.MetricName] + infoSigs = append(infoSigs, sigfs[name](s.Metric)) + } + + var warnings annotations.Annotations + for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { + if err := contextDone(ctx, "expression evaluation"); err != nil { + ev.error(err) + } + + // Reset number of samples in memory after each timestamp. + ev.currentSamples = tempNumSamples + // Gather input vectors for this timestamp. + baseVector, _ = ev.gatherVector(ts, mat, baseVector, nil, nil) + infoVector, _ = ev.gatherVector(ts, infoMat, infoVector, nil, nil) + + enh.Ts = ts + result, err := ev.combineWithInfoVector(baseVector, infoVector, ignoreSeries, baseSigs, infoSigs, enh, dataLabelMatchers) + if err != nil { + ev.error(err) + } + enh.Out = result[:0] // Reuse result vector. + + vecNumSamples := result.TotalSamples() + ev.currentSamples += vecNumSamples + // When we reset currentSamples to tempNumSamples during the next iteration of the loop it also + // needs to include the samples from the result here, as they're still in memory. + tempNumSamples += vecNumSamples + ev.samplesStats.UpdatePeak(ev.currentSamples) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + + // Add samples in result vector to output series. + for _, sample := range result { + h := sample.Metric.Hash() + ss, exists := seriess[h] + if exists { + if ss.ts == ts { // If we've seen this output series before at this timestamp, it's a duplicate. + ev.errorf("vector cannot contain metrics with the same labelset") + } + ss.ts = ts + } else { + ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts} + } + addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps) + seriess[h] = ss + } + } + + // Reuse the original point slices. + for _, m := range origMatrices { + for _, s := range m { + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) + } + } + // Assemble the output matrix. By the time we get here we know we don't have too many samples. + numSamples := 0 + output := make(Matrix, 0, len(seriess)) + for _, ss := range seriess { + numSamples += len(ss.Floats) + totalHPointSize(ss.Histograms) + output = append(output, ss.Series) + } + ev.currentSamples = originalNumSamples + numSamples + ev.samplesStats.UpdatePeak(ev.currentSamples) + return output, warnings +} + +// combineWithInfoVector combines base and info Vectors. +// Base series in ignoreSeries are not combined. +func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[int]struct{}, baseSigs []map[string]string, infoSigs []string, enh *EvalNodeHelper, dataLabelMatchers map[string][]*labels.Matcher) (Vector, error) { + if len(base) == 0 { + return nil, nil // Short-circuit: nothing is going to match. + } + + // All samples from the info Vector hashed by the matching label/values. + if enh.rightSigs == nil { + enh.rightSigs = make(map[string]Sample, len(enh.Out)) + } else { + clear(enh.rightSigs) + } + + for i, s := range info { + if s.H != nil { + ev.error(errors.New("info sample should be float")) + } + // We encode original info sample timestamps via the float value. + origT := int64(s.F) + + sig := infoSigs[i] + if existing, exists := enh.rightSigs[sig]; exists { + // We encode original info sample timestamps via the float value. + existingOrigT := int64(existing.F) + switch { + case existingOrigT > origT: + // Keep the other info sample, since it's newer. + case existingOrigT < origT: + // Keep this info sample, since it's newer. + enh.rightSigs[sig] = s + default: + // The two info samples have the same timestamp - conflict. + name := s.Metric.Map()[labels.MetricName] + ev.errorf("found duplicate series for info metric %s", name) + } + } else { + enh.rightSigs[sig] = s + } + } + + for i, bs := range base { + if _, exists := ignoreSeries[i]; exists { + // This series should not be enriched with info metric data labels. + enh.Out = append(enh.Out, Sample{ + Metric: bs.Metric, + F: bs.F, + H: bs.H, + }) + continue + } + + baseLabels := bs.Metric.Map() + enh.resetBuilder(labels.Labels{}) + + // For every info metric name, try to find an info series with the same signature. + seenInfoMetrics := map[string]struct{}{} + for infoName, sig := range baseSigs[i] { + is, exists := enh.rightSigs[sig] + if !exists { + continue + } + if _, exists := seenInfoMetrics[infoName]; exists { + continue + } + + err := is.Metric.Validate(func(l labels.Label) error { + if l.Name == labels.MetricName { + return nil + } + if _, exists := dataLabelMatchers[l.Name]; len(dataLabelMatchers) > 0 && !exists { + // Not among the specified data label matchers. + return nil + } + + if v := enh.lb.Get(l.Name); v != "" && v != l.Value { + return fmt.Errorf("conflicting label: %s", l.Name) + } + if _, exists := baseLabels[l.Name]; exists { + // Skip labels already on the base metric. + return nil + } + + enh.lb.Set(l.Name, l.Value) + return nil + }) + if err != nil { + return nil, err + } + seenInfoMetrics[infoName] = struct{}{} + } + + infoLbls := enh.lb.Labels() + if infoLbls.Len() == 0 { + // If there's at least one data label matcher not matching the empty string, + // we have to ignore this series as there are no matching info series. + allMatchersMatchEmpty := true + for _, ms := range dataLabelMatchers { + for _, m := range ms { + if !m.Matches("") { + allMatchersMatchEmpty = false + break + } + } + } + if !allMatchersMatchEmpty { + continue + } + } + + enh.resetBuilder(bs.Metric) + infoLbls.Range(func(l labels.Label) { + enh.lb.Set(l.Name, l.Value) + }) + + enh.Out = append(enh.Out, Sample{ + Metric: enh.lb.Labels(), + F: bs.F, + H: bs.H, + }) + } + return enh.Out, nil +} diff --git a/promql/info_test.go b/promql/info_test.go new file mode 100644 index 0000000000..2e7a67172f --- /dev/null +++ b/promql/info_test.go @@ -0,0 +1,140 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql_test + +import ( + "testing" + + "github.com/prometheus/prometheus/promql/promqltest" +) + +// The "info" function is experimental. This is why we write those tests here for now instead of promqltest/testdata/info.test. +func TestInfo(t *testing.T) { + engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) + promqltest.RunTest(t, ` +load 5m + metric{instance="a", job="1", label="value"} 0 1 2 + metric_not_matching_target_info{instance="a", job="2", label="value"} 0 1 2 + metric_with_overlapping_label{instance="a", job="1", label="value", data="base"} 0 1 2 + target_info{instance="a", job="1", data="info", another_data="another info"} 1 1 1 + build_info{instance="a", job="1", build_data="build"} 1 1 1 + +# Include one info metric data label. +eval range from 0m to 10m step 5m info(metric, {data=~".+"}) + metric{data="info", instance="a", job="1", label="value"} 0 1 2 + +# Include all info metric data labels. +eval range from 0m to 10m step 5m info(metric) + metric{data="info", instance="a", job="1", label="value", another_data="another info"} 0 1 2 + +# Try including all info metric data labels, but non-matching identifying labels. +eval range from 0m to 10m step 5m info(metric_not_matching_target_info) + metric_not_matching_target_info{instance="a", job="2", label="value"} 0 1 2 + +# Try including a certain info metric data label with a non-matching matcher not accepting empty labels. +# Metric is ignored, due there being a data label matcher not matching empty labels, +# and there being no info series matches. +eval range from 0m to 10m step 5m info(metric, {non_existent=~".+"}) + +# Include a certain info metric data label together with a non-matching matcher accepting empty labels. +# Since the non_existent matcher matches empty labels, it's simply ignored when there's no match. +# XXX: This case has to include a matcher not matching empty labels, due the PromQL limitation +# that vector selectors have to contain at least one matcher not accepting empty labels. +# We might need another construct than vector selector to get around this limitation. +eval range from 0m to 10m step 5m info(metric, {data=~".+", non_existent=~".*"}) + metric{data="info", instance="a", job="1", label="value"} 0 1 2 + +# Info series data labels overlapping with those of base series are ignored. +eval range from 0m to 10m step 5m info(metric_with_overlapping_label) + metric_with_overlapping_label{data="base", instance="a", job="1", label="value", another_data="another info"} 0 1 2 + +# Include data labels from target_info specifically. +eval range from 0m to 10m step 5m info(metric, {__name__="target_info"}) + metric{data="info", instance="a", job="1", label="value", another_data="another info"} 0 1 2 + +# Try to include all data labels from a non-existent info metric. +eval range from 0m to 10m step 5m info(metric, {__name__="non_existent"}) + metric{instance="a", job="1", label="value"} 0 1 2 + +# Try to include a certain data label from a non-existent info metric. +eval range from 0m to 10m step 5m info(metric, {__name__="non_existent", data=~".+"}) + +# Include data labels from build_info. +eval range from 0m to 10m step 5m info(metric, {__name__="build_info"}) + metric{instance="a", job="1", label="value", build_data="build"} 0 1 2 + +# Include data labels from build_info and target_info. +eval range from 0m to 10m step 5m info(metric, {__name__=~".+_info"}) + metric{instance="a", job="1", label="value", build_data="build", data="info", another_data="another info"} 0 1 2 + +# Info metrics themselves are ignored when it comes to enriching with info metric data labels. +eval range from 0m to 10m step 5m info(build_info, {__name__=~".+_info", build_data=~".+"}) + build_info{instance="a", job="1", build_data="build"} 1 1 1 + +clear + +# Overlapping target_info series. +load 5m + metric{instance="a", job="1", label="value"} 0 1 2 + target_info{instance="a", job="1", data="info", another_data="another info"} 1 1 _ + target_info{instance="a", job="1", data="updated info", another_data="another info"} _ _ 1 + +# Conflicting info series are resolved through picking the latest sample. +eval range from 0m to 10m step 5m info(metric) + metric{data="info", instance="a", job="1", label="value", another_data="another info"} 0 1 _ + metric{data="updated info", instance="a", job="1", label="value", another_data="another info"} _ _ 2 + +clear + +# Non-overlapping target_info series. +load 5m + metric{instance="a", job="1", label="value"} 0 1 2 + target_info{instance="a", job="1", data="info"} 1 1 stale + target_info{instance="a", job="1", data="updated info"} _ _ 1 + +# Include info metric data labels from a metric which data labels change over time. +eval range from 0m to 10m step 5m info(metric) + metric{data="info", instance="a", job="1", label="value"} 0 1 _ + metric{data="updated info", instance="a", job="1", label="value"} _ _ 2 + +clear + +# Info series selector matches histogram series, info metrics should be float type. +load 5m + metric{instance="a", job="1", label="value"} 0 1 2 + histogram{instance="a", job="1"} {{schema:1 sum:3 count:22 buckets:[5 10 7]}} + +eval_fail range from 0m to 10m step 5m info(metric, {__name__="histogram"}) + +clear + +# Series with skipped scrape. +load 1m + metric{instance="a", job="1", label="value"} 0 _ 2 3 4 + target_info{instance="a", job="1", data="info"} 1 _ 1 1 1 + +# Lookback works also for the info series. +eval range from 1m to 4m step 1m info(metric) + metric{data="info", instance="a", job="1", label="value"} 0 2 3 4 + +# @ operator works also with info. +# Note that we pick the timestamp missing a sample, lookback should pick previous sample. +eval range from 1m to 4m step 1m info(metric @ 60) + metric{data="info", instance="a", job="1", label="value"} 0 0 0 0 + +# offset operator works also with info. +eval range from 1m to 4m step 1m info(metric offset 1m) + metric{data="info", instance="a", job="1", label="value"} 0 0 2 3 +`, engine) +} diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 830e8a2c5e..dc3e36b5b5 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -19,9 +19,8 @@ import ( "time" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/storage" ) // Node is a generic interface for all nodes in an AST. @@ -111,6 +110,16 @@ type BinaryExpr struct { ReturnBool bool } +// DurationExpr represents a binary expression between two duration expressions. +type DurationExpr struct { + Op ItemType // The operation of the expression. + LHS, RHS Expr // The operands on the respective sides of the operator. + Wrapped bool // Set when the duration is wrapped in parentheses. + + StartPos posrange.Pos // For unary operations and step(), the start position of the operator. + EndPos posrange.Pos // For step(), the end position of the operator. +} + // Call represents a function call. type Call struct { Func *Function // The function that was called. @@ -125,24 +134,27 @@ type MatrixSelector struct { // if the parser hasn't returned an error. VectorSelector Expr Range time.Duration - - EndPos posrange.Pos + RangeExpr *DurationExpr + EndPos posrange.Pos } // SubqueryExpr represents a subquery. type SubqueryExpr struct { - Expr Expr - Range time.Duration + Expr Expr + Range time.Duration + RangeExpr *DurationExpr // OriginalOffset is the actual offset that was set in the query. - // This never changes. OriginalOffset time.Duration + // OriginalOffsetExpr is the actual offset expression that was set in the query. + OriginalOffsetExpr *DurationExpr // Offset is the offset used during the query execution - // which is calculated using the original offset, at modifier time, + // which is calculated using the original offset, offset expression, at modifier time, // eval time, and subquery offsets in the AST tree. Offset time.Duration Timestamp *int64 StartOrEnd ItemType // Set when @ is used with start() or end() Step time.Duration + StepExpr *DurationExpr EndPos posrange.Pos } @@ -151,6 +163,7 @@ type SubqueryExpr struct { type NumberLiteral struct { Val float64 + Duration bool // Used to format the number as a duration. PosRange posrange.PositionRange } @@ -192,9 +205,10 @@ func (e *StepInvariantExpr) PositionRange() posrange.PositionRange { // VectorSelector represents a Vector selection. type VectorSelector struct { Name string - // OriginalOffset is the actual offset that was set in the query. - // This never changes. + // OriginalOffset is the actual offset calculated from OriginalOffsetExpr. OriginalOffset time.Duration + // OriginalOffsetExpr is the actual offset that was set in the query. + OriginalOffsetExpr *DurationExpr // Offset is the offset used during the query execution // which is calculated using the original offset, at modifier time, // eval time, and subquery offsets in the AST tree. @@ -208,6 +222,10 @@ type VectorSelector struct { UnexpandedSeriesSet storage.SeriesSet Series []storage.Series + // BypassEmptyMatcherCheck is true when the VectorSelector isn't required to have at least one matcher matching the empty string. + // This is the case when VectorSelector is used to represent the info function's second argument. + BypassEmptyMatcherCheck bool + PosRange posrange.PositionRange } @@ -241,6 +259,7 @@ func (e *BinaryExpr) Type() ValueType { return ValueTypeVector } func (e *StepInvariantExpr) Type() ValueType { return e.Expr.Type() } +func (e *DurationExpr) Type() ValueType { return ValueTypeScalar } func (*AggregateExpr) PromQLExpr() {} func (*BinaryExpr) PromQLExpr() {} @@ -253,6 +272,7 @@ func (*StringLiteral) PromQLExpr() {} func (*UnaryExpr) PromQLExpr() {} func (*VectorSelector) PromQLExpr() {} func (*StepInvariantExpr) PromQLExpr() {} +func (*DurationExpr) PromQLExpr() {} // VectorMatchCardinality describes the cardinality relationship // of two Vectors in a binary operation. @@ -352,8 +372,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) { // f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f // for all the non-nil children of node, recursively. func Inspect(node Node, f inspector) { - //nolint: errcheck - Walk(f, node, nil) + Walk(f, node, nil) //nolint:errcheck } // Children returns a list of all child nodes of a syntax tree node. @@ -419,7 +438,7 @@ func mergeRanges(first, last Node) posrange.PositionRange { } } -// Item implements the Node interface. +// PositionRange implements the Node interface. // This makes it possible to call mergeRanges on them. func (i *Item) PositionRange() posrange.PositionRange { return posrange.PositionRange{ @@ -436,6 +455,28 @@ func (e *BinaryExpr) PositionRange() posrange.PositionRange { return mergeRanges(e.LHS, e.RHS) } +func (e *DurationExpr) PositionRange() posrange.PositionRange { + if e.Op == STEP { + return posrange.PositionRange{ + Start: e.StartPos, + End: e.EndPos, + } + } + if e.RHS == nil { + return posrange.PositionRange{ + Start: e.StartPos, + End: e.RHS.PositionRange().End, + } + } + if e.LHS == nil { + return posrange.PositionRange{ + Start: e.StartPos, + End: e.RHS.PositionRange().End, + } + } + return mergeRanges(e.LHS, e.RHS) +} + func (e *Call) PositionRange() posrange.PositionRange { return e.PosRange } diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 99b41321fe..dfb181833f 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -202,10 +202,11 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, ReturnType: ValueTypeVector, }, - "holt_winters": { - Name: "holt_winters", - ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, - ReturnType: ValueTypeVector, + "double_exponential_smoothing": { + Name: "double_exponential_smoothing", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Experimental: true, }, "hour": { Name: "hour", @@ -223,6 +224,13 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeMatrix}, ReturnType: ValueTypeVector, }, + "info": { + Name: "info", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeVector}, + ReturnType: ValueTypeVector, + Experimental: true, + Variadic: 1, + }, "irate": { Name: "irate", ArgTypes: []ValueType{ValueTypeMatrix}, @@ -275,6 +283,24 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeMatrix}, ReturnType: ValueTypeVector, }, + "ts_of_max_over_time": { + Name: "ts_of_max_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Experimental: true, + }, + "ts_of_min_over_time": { + Name: "ts_of_min_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Experimental: true, + }, + "ts_of_last_over_time": { + Name: "ts_of_last_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Experimental: true, + }, "minute": { Name: "minute", ArgTypes: []ValueType{ValueTypeVector}, diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index b99e67424f..e7e16cd033 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -23,6 +23,8 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/promql/parser/posrange" + + "github.com/prometheus/common/model" ) %} @@ -84,6 +86,7 @@ NEGATIVE_BUCKETS_DESC ZERO_BUCKET_DESC ZERO_BUCKET_WIDTH_DESC CUSTOM_VALUES_DESC +COUNTER_RESET_HINT_DESC %token histogramDescEnd // Operators. @@ -147,8 +150,17 @@ WITHOUT %token START END +STEP %token preprocessorEnd +// Counter reset hints. +%token counterResetHintsStart +%token +UNKNOWN_COUNTER_RESET +COUNTER_RESET +NOT_COUNTER_RESET +GAUGE_TYPE +%token counterResetHintsEnd // Start symbols for the generated parser. %token startSymbolsStart @@ -163,7 +175,7 @@ START_METRIC_SELECTOR // Type definitions for grammar rules. %type label_match_list %type label_matcher -%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier +%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint min_max %type label_set metric %type label_set_list %type