diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 3d56ff2b22..89b2f4d0b6 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -11,6 +11,7 @@ updates: go.opentelemetry.io: patterns: - "go.opentelemetry.io/*" + open-pull-requests-limit: 20 - package-ecosystem: "gomod" directory: "/documentation/examples/remote_storage" schedule: @@ -19,6 +20,7 @@ updates: directory: "/web/ui" schedule: interval: "monthly" + open-pull-requests-limit: 20 - package-ecosystem: "github-actions" directory: "/" schedule: diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 04e5cea792..acf91ea12a 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -12,14 +12,14 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 with: github_token: ${{ secrets.GITHUB_TOKEN }} - - uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0 + - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 with: input: 'prompb' - - uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2 + - uses: bufbuild/buf-breaking-action@c57b3d842a5c3f3b454756ef65305a50a587c5ba # v1.1.4 with: input: 'prompb' against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD,subdir=prompb' diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index db240c9f80..f52d20785f 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -12,14 +12,14 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 with: github_token: ${{ secrets.GITHUB_TOKEN }} - - uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0 + - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 with: input: 'prompb' - - uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2 + - uses: bufbuild/buf-breaking-action@c57b3d842a5c3f3b454756ef65305a50a587c5ba # v1.1.4 with: input: 'prompb' against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 714faf167d..8b3624383c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ jobs: # should also be updated. image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/setup_environment - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 @@ -27,7 +27,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... @@ -43,7 +43,7 @@ jobs: # The go version in this image should be N-1 wrt test_go. image: quay.io/prometheus/golang-builder:1.21-base steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - run: make build # Don't run NPM build; don't run race-detector. - run: make test GO_ONLY=1 test-flags="" @@ -57,7 +57,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/setup_environment with: @@ -74,8 +74,8 @@ jobs: name: Go tests on Windows runs-on: windows-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version: 1.22.x - run: | @@ -91,7 +91,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - run: go install ./cmd/promtool/. - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest @@ -114,7 +114,7 @@ jobs: matrix: thread: [ 0, 1, 2 ] steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/build with: @@ -137,32 +137,44 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/build with: parallelism: 12 thread: ${{ matrix.thread }} + check_generated_parser: + name: Check generated parser + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - name: Install Go + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + with: + cache: false + go-version: 1.22.x + - name: Run goyacc and check for diff + run: make install-goyacc check-generated-parser golangci: name: golangci-lint runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Install Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: - cache: false go-version: 1.22.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. - version: v1.56.2 + version: v1.59.1 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' @@ -175,7 +187,7 @@ jobs: needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/publish_main with: @@ -189,7 +201,7 @@ jobs: needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/publish_release with: @@ -204,7 +216,7 @@ jobs: needs: [test_ui, codeql] steps: - name: Checkout - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - name: Install nodejs uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 836fb25682..1ea1f5efae 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,15 +24,15 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Initialize CodeQL - uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml index d0368eaa1c..8ddbc34aeb 100644 --- a/.github/workflows/container_description.yml +++ b/.github/workflows/container_description.yml @@ -4,6 +4,7 @@ on: push: paths: - "README.md" + - "README-containers.md" - ".github/workflows/container_description.yml" branches: [ main, master ] @@ -17,7 +18,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Set docker hub repo name run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - name: Push README to Dockerhub @@ -29,7 +30,9 @@ jobs: destination_container_repo: ${{ env.DOCKER_REPO_NAME }} provider: dockerhub short_description: ${{ env.DOCKER_REPO_NAME }} - readme_file: 'README.md' + # Empty string results in README-containers.md being pushed if it + # exists. Otherwise, README.md is pushed. + readme_file: '' PushQuayIoReadme: runs-on: ubuntu-latest @@ -37,7 +40,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Set quay.io org name run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV - name: Set quay.io repo name @@ -49,4 +52,6 @@ jobs: with: destination_container_repo: ${{ env.DOCKER_REPO_NAME }} provider: quay - readme_file: 'README.md' + # Empty string results in README-containers.md being pushed if it + # exists. Otherwise, README.md is pushed. + readme_file: '' diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index 4c19563eb4..dc510e5966 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -21,7 +21,7 @@ jobs: fuzz-seconds: 600 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index 3458d7b119..537e9abd84 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -13,7 +13,7 @@ jobs: container: image: quay.io/prometheus/golang-builder steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - run: ./scripts/sync_repo_files.sh env: GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 51ff643ab2..078084888a 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -21,12 +21,12 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # tag=v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 with: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # tag=v2.3.1 + uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3 with: results_file: results.sarif results_format: sarif @@ -37,7 +37,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # tag=v4.3.1 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3 with: name: SARIF file path: results.sarif @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # tag=v3.22.12 + uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # tag=v3.25.8 with: sarif_file: results.sarif diff --git a/.gitpod.Dockerfile b/.gitpod.Dockerfile index d645db5de1..2370ec5f5c 100644 --- a/.gitpod.Dockerfile +++ b/.gitpod.Dockerfile @@ -1,15 +1,33 @@ FROM gitpod/workspace-full +# Set Node.js version as an environment variable. ENV CUSTOM_NODE_VERSION=16 -ENV CUSTOM_GO_VERSION=1.19 -ENV GOPATH=$HOME/go-packages -ENV GOROOT=$HOME/go -ENV PATH=$GOROOT/bin:$GOPATH/bin:$PATH +# Install and use the specified Node.js version via nvm. RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}" +# Ensure nvm uses the default Node.js version in all new shells. RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix -RUN curl -fsSL https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz | tar xzs \ - && printf '%s\n' 'export GOPATH=/workspace/go' \ - 'export PATH=$GOPATH/bin:$PATH' > $HOME/.bashrc.d/300-go +# Remove any existing Go installation in $HOME path. +RUN rm -rf $HOME/go $HOME/go-packages + +# Export go environment variables. +RUN echo "export GOPATH=/workspace/go" >> ~/.bashrc.d/300-go && \ + echo "export GOBIN=\$GOPATH/bin" >> ~/.bashrc.d/300-go && \ + echo "export GOROOT=${HOME}/go" >> ~/.bashrc.d/300-go && \ + echo "export PATH=\$GOROOT/bin:\$GOBIN:\$PATH" >> ~/.bashrc + +# Reload the environment variables to ensure go environment variables are +# available in subsequent commands. +RUN bash -c "source ~/.bashrc && source ~/.bashrc.d/300-go" + +# Fetch the Go version dynamically from the Prometheus go.mod file and Install Go in $HOME path. +RUN export CUSTOM_GO_VERSION=$(curl -sSL "https://raw.githubusercontent.com/prometheus/prometheus/main/go.mod" | awk '/^go/{print $2".0"}') && \ + curl -fsSL "https://dl.google.com/go/go${CUSTOM_GO_VERSION}.linux-amd64.tar.gz" | \ + tar -xz -C $HOME + +# Fetch the goyacc parser version dynamically from the Prometheus Makefile +# and install it globally in $GOBIN path. +RUN GOYACC_VERSION=$(curl -fsSL "https://raw.githubusercontent.com/prometheus/prometheus/main/Makefile" | awk -F'=' '/GOYACC_VERSION \?=/{gsub(/ /, "", $2); print $2}') && \ + go install "golang.org/x/tools/cmd/goyacc@${GOYACC_VERSION}" diff --git a/.golangci.yml b/.golangci.yml index a85a76cdf1..026d68a313 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -21,6 +21,7 @@ linters: - goimports - misspell - nolintlint + - perfsprint - predeclared - revive - testifylint @@ -28,6 +29,7 @@ linters: - unused - usestdlibvars - whitespace + - loggercheck issues: max-same-issues: 0 @@ -44,7 +46,9 @@ issues: - linters: - godot source: "^// ===" - + - linters: + - perfsprint + text: "fmt.Sprintf can be replaced with string concatenation" linters-settings: depguard: rules: @@ -85,6 +89,9 @@ linters-settings: local-prefixes: github.com/prometheus/prometheus gofumpt: extra-rules: true + perfsprint: + # Optimizes `fmt.Errorf`. + errorf: false revive: # By default, revive will enable only the linting rules that are named in the configuration file. # So, it's needed to explicitly set in configuration all required rules. diff --git a/CHANGELOG.md b/CHANGELOG.md index 0afd8d7026..7e47999934 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,56 @@ ## unreleased +## 2.53.0 / 2024-06-16 + +This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75. + +* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 #14048 +* [CHANGE] Runtime: Change GOGC threshold from 100 to 75 #14176 #14285 +* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 #14273 +* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` metric to measure the time it takes to restore a rule group. #13974 +* [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991 +* [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620 +* [BUGFIX] OTLP: Don't generate target_info unless there are metrics and at least one identifying label is defined. #13991 +* [BUGFIX] Scrape: Do no try to ingest native histograms when the native histograms feature is turned off. This happened when protobuf scrape was enabled by for example the created time feature. #13987 +* [BUGFIX] Scaleway SD: Use the instance's public IP if no private IP is available as the `__address__` meta label. #13941 +* [BUGFIX] Query logger: Do not leak file descriptors on error. #13948 +* [BUGFIX] TSDB: Let queries with heavy regex matches be cancelled and not use up the CPU. #14096 #14103 #14118 #14199 +* [BUGFIX] API: Do not warn if result count is equal to the limit, only when exceeding the limit for the series, label-names and label-values APIs. #14116 +* [BUGFIX] TSDB: Fix head stats and hooks when replaying a corrupted snapshot. #14079 + +## 2.52.1 / 2024-05-29 + +* [BUGFIX] Linode SD: Fix partial fetch when discovery would return more than 500 elements. #14141 + +## 2.52.0 / 2024-05-07 + * [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633 +* [FEATURE] Kubernetes SD: Add a new metric `prometheus_sd_kubernetes_failures_total` to track failed requests to Kubernetes API. #13554 +* [FEATURE] Kubernetes SD: Add node and zone metadata labels when using the endpointslice role. #13935 +* [FEATURE] Azure SD/Remote Write: Allow usage of Azure authorization SDK. #13099 +* [FEATURE] Alerting: Support native histogram templating. #13731 +* [FEATURE] Linode SD: Support IPv6 range discovery and region filtering. #13774 +* [ENHANCEMENT] PromQL: Performance improvements for queries with regex matchers. #13461 +* [ENHANCEMENT] PromQL: Performance improvements when using aggregation operators. #13744 +* [ENHANCEMENT] PromQL: Validate label_join destination label. #13803 +* [ENHANCEMENT] Scrape: Increment `prometheus_target_scrapes_sample_duplicate_timestamp_total` metric on duplicated series during one scrape. #12933 +* [ENHANCEMENT] TSDB: Many improvements in performance. #13742 #13673 #13782 +* [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754 +* [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772 +* [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823 +* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838 +* [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846 +* [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667 +* [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852 +* [BUGFIX] PromQL: Fix possible duplicated label name and values in a metric result for specific queries. #13845 +* [BUGFIX] Scrape: Fix setting native histogram schema factor during scrape. #13846 +* [BUGFIX] TSDB: Fix counting of histogram samples when creating WAL checkpoint stats. #13776 +* [BUGFIX] TSDB: Fix cases of compacting empty heads. #13755 +* [BUGFIX] TSDB: Count float histograms in WAL checkpoint. #13844 +* [BUGFIX] Remote Read: Fix memory leak due to broken requests. #13777 +* [BUGFIX] API: Stop building response for `/api/v1/series/` when the API request was cancelled. #13766 +* [BUGFIX] promtool: Fix panic on `promtool tsdb analyze --extended` when no native histograms are present. #13976 ## 2.51.2 / 2024-04-09 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7687826ba4..9b1b286ccf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -42,7 +42,12 @@ go build ./cmd/prometheus/ make test # Make sure all the tests pass before you commit and push :) ``` -We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. +To run a collection of Go linters through [`golangci-lint`](https://github.com/golangci/golangci-lint), do: +```bash +make lint +``` + +If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. See [this section of the golangci-lint documentation](https://golangci-lint.run/usage/false-positives/#nolint-directive) for more information. All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labeling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions). diff --git a/MAINTAINERS.md b/MAINTAINERS.md index d9a7d0f783..3661ddaa0a 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -16,7 +16,7 @@ Maintainers for specific parts of the codebase: * `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7), George Krajcsovits ( / @krajorama) * `storage` - * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie ( / @tomwilkie) + * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( / @npazosmendez), Alex Greenbank ( / @alexgreenbank) * `otlptranslator`: Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez) * `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez) * `agent`: Robert Fratto ( / @rfratto) diff --git a/Makefile b/Makefile index b8dff34884..ea66309b50 100644 --- a/Makefile +++ b/Makefile @@ -24,6 +24,7 @@ TSDB_BENCHMARK_DATASET ?= ./tsdb/testdata/20kseries.json TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout GOLANGCI_LINT_OPTS ?= --timeout 4m +GOYACC_VERSION ?= v0.6.0 include Makefile.common @@ -86,24 +87,42 @@ assets-tarball: assets @echo '>> packaging assets' scripts/package_assets.sh -# We only want to generate the parser when there's changes to the grammar. .PHONY: parser parser: @echo ">> running goyacc to generate the .go file." ifeq (, $(shell command -v goyacc 2> /dev/null)) @echo "goyacc not installed so skipping" - @echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0" + @echo "To install: \"go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)\" or run \"make install-goyacc\"" else - goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y + $(MAKE) promql/parser/generated_parser.y.go endif +promql/parser/generated_parser.y.go: promql/parser/generated_parser.y + @echo ">> running goyacc to generate the .go file." + @$(FIRST_GOPATH)/bin/goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y + +.PHONY: clean-parser +clean-parser: + @echo ">> cleaning generated parser" + @rm -f promql/parser/generated_parser.y.go + +.PHONY: check-generated-parser +check-generated-parser: clean-parser promql/parser/generated_parser.y.go + @echo ">> checking generated parser" + @git diff --exit-code -- promql/parser/generated_parser.y.go || (echo "Generated parser is out of date. Please run 'make parser' and commit the changes." && false) + +.PHONY: install-goyacc +install-goyacc: + @echo ">> installing goyacc $(GOYACC_VERSION)" + @go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION) + .PHONY: test # If we only want to only test go code we have to change the test target # which is called by all. ifeq ($(GO_ONLY),1) test: common-test check-go-mod-version else -test: common-test ui-build-module ui-test ui-lint check-go-mod-version +test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version endif .PHONY: npm_licenses diff --git a/Makefile.common b/Makefile.common index 0acfb9d806..e3da72ab47 100644 --- a/Makefile.common +++ b/Makefile.common @@ -55,13 +55,13 @@ ifneq ($(shell command -v gotestsum 2> /dev/null),) endif endif -PROMU_VERSION ?= 0.15.0 +PROMU_VERSION ?= 0.17.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.56.2 +GOLANGCI_LINT_VERSION ?= v1.59.1 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/README.md b/README.md index 023619a781..cd14ed2ecb 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@

- Prometheus
Prometheus + Prometheus
Prometheus

-

Visit prometheus.io for the full documentation, +

Visit prometheus.io for the full documentation, examples and guides.

diff --git a/RELEASE.md b/RELEASE.md index f313c4172d..0d3f7456cd 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -57,7 +57,8 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) | | v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) | | v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) | -| v2.53 | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) | +| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) | +| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. @@ -149,6 +150,8 @@ Changes for a patch release or release candidate should be merged into the previ Bump the version in the `VERSION` file and update `CHANGELOG.md`. Do this in a proper PR pointing to the release branch as this gives others the opportunity to chime in on the release in general and on the addition to the changelog in particular. For a release candidate, append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.). +When updating the `CHANGELOG.md` look at all PRs included in the release since the last release and verify if they need a changelog entry. + Note that `CHANGELOG.md` should only document changes relevant to users of Prometheus, including external API changes, performance improvements, and new features. Do not document changes of internal interfaces, code refactorings and clean-ups, changes to the build process, etc. People interested in these are asked to refer to the git history. For release candidates still update `CHANGELOG.md`, but when you cut the final release later, merge all the changes from the pre-releases into the one final update. diff --git a/VERSION b/VERSION index 587b583f91..261d95596f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.51.2 +2.53.0 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index ed62926cc6..b4737e6287 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -28,6 +28,8 @@ import ( "os/signal" "path/filepath" "runtime" + "runtime/debug" + "strconv" "strings" "sync" "syscall" @@ -42,6 +44,7 @@ import ( "github.com/mwitkow/go-conntrack" "github.com/oklog/run" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/common/model" "github.com/prometheus/common/promlog" @@ -191,6 +194,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "extra-scrape-metrics": c.scrape.ExtraMetrics = true level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled") + case "metadata-wal-records": + c.scrape.AppendMetadata = true + level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0") case "new-service-discovery-manager": c.enableNewSDManager = true level.Info(logger).Log("msg", "Experimental service discovery manager") @@ -217,6 +223,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") case "native-histograms": c.tsdb.EnableNativeHistograms = true + c.scrape.EnableNativeHistogramsIngestion = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols @@ -254,6 +261,18 @@ func main() { newFlagRetentionDuration model.Duration ) + // Unregister the default GoCollector, and reregister with our defaults. + if prometheus.Unregister(collectors.NewGoCollector()) { + prometheus.MustRegister( + collectors.NewGoCollector( + collectors.WithGoCollectorRuntimeMetrics( + collectors.MetricsGC, + collectors.MetricsScheduler, + ), + ), + ) + } + cfg := flagConfig{ notifier: notifier.Options{ Registerer: prometheus.DefaultRegisterer, @@ -309,9 +328,15 @@ func main() { a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions."). Default("false").BoolVar(&cfg.web.EnableAdminAPI) + // TODO(bwplotka): Consider allowing those remote receive flags to be changed in config. + // See https://github.com/prometheus/prometheus/issues/14410 a.Flag("web.enable-remote-write-receiver", "Enable API endpoint accepting remote write requests."). Default("false").BoolVar(&cfg.web.EnableRemoteWriteReceiver) + supportedRemoteWriteProtoMsgs := config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} + a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())). + Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs)) + a.Flag("web.console.templates", "Path to the console template directory, available at /consoles."). Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath) @@ -420,7 +445,7 @@ func main() { serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager."). Default("1m").SetValue(&cfg.resendDelay) - serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently."). + serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently. When set, \"query.max-concurrency\" may need to be adjusted accordingly."). Default("4").Int64Var(&cfg.maxConcurrentEvals) a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release."). @@ -432,6 +457,9 @@ func main() { serverOnlyFlag(a, "alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications."). Default("10000").IntVar(&cfg.notifier.QueueCapacity) + serverOnlyFlag(a, "alertmanager.drain-notification-queue-on-shutdown", "Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down."). + Default("true").BoolVar(&cfg.notifier.DrainOnShutdown) + // TODO: Remove in Prometheus 3.0. alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String() @@ -630,7 +658,7 @@ func main() { var ( localStorage = &readyStorage{stats: tsdb.NewDBStats()} scraper = &readyScrapeManager{} - remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper) + remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) ) @@ -774,6 +802,9 @@ func main() { ResendDelay: time.Duration(cfg.resendDelay), MaxConcurrentEvals: cfg.maxConcurrentEvals, ConcurrentEvalsEnabled: cfg.enableConcurrentRuleEval, + DefaultRuleQueryOffset: func() time.Duration { + return time.Duration(cfgFile.GlobalConfig.RuleQueryOffset) + }, }) } @@ -1181,7 +1212,7 @@ func main() { } if agentMode { // WAL storage. - opts := cfg.agent.ToAgentOptions() + opts := cfg.agent.ToAgentOptions(cfg.tsdb.OutOfOrderTimeWindow) cancel := make(chan struct{}) g.Add( func() error { @@ -1217,6 +1248,7 @@ func main() { "TruncateFrequency", cfg.agent.TruncateFrequency, "MinWALTime", cfg.agent.MinWALTime, "MaxWALTime", cfg.agent.MaxWALTime, + "OutOfOrderTimeWindow", cfg.agent.OutOfOrderTimeWindow, ) localStorage.Set(db, 0) @@ -1370,6 +1402,17 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) } + oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC) + if oldGoGC != conf.Runtime.GoGC { + level.Info(logger).Log("msg", "updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC) + } + // Write the new setting out to the ENV var for runtime API output. + if conf.Runtime.GoGC >= 0 { + os.Setenv("GOGC", strconv.Itoa(conf.Runtime.GoGC)) + } else { + os.Setenv("GOGC", "off") + } + noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval) l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)} level.Info(logger).Log(append(l, timings...)...) @@ -1709,17 +1752,22 @@ type agentOptions struct { TruncateFrequency model.Duration MinWALTime, MaxWALTime model.Duration NoLockfile bool + OutOfOrderTimeWindow int64 } -func (opts agentOptions) ToAgentOptions() agent.Options { +func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Options { + if outOfOrderTimeWindow < 0 { + outOfOrderTimeWindow = 0 + } return agent.Options{ - WALSegmentSize: int(opts.WALSegmentSize), - WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), - StripeSize: opts.StripeSize, - TruncateFrequency: time.Duration(opts.TruncateFrequency), - MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)), - MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)), - NoLockfile: opts.NoLockfile, + WALSegmentSize: int(opts.WALSegmentSize), + WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), + StripeSize: opts.StripeSize, + TruncateFrequency: time.Duration(opts.TruncateFrequency), + MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)), + MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)), + NoLockfile: opts.NoLockfile, + OutOfOrderTimeWindow: outOfOrderTimeWindow, } } @@ -1731,3 +1779,39 @@ type discoveryManager interface { Run() error SyncCh() <-chan map[string][]*targetgroup.Group } + +// rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum. +type rwProtoMsgFlagParser struct { + msgs *[]config.RemoteWriteProtoMsg +} + +func rwProtoMsgFlagValue(msgs *[]config.RemoteWriteProtoMsg) kingpin.Value { + return &rwProtoMsgFlagParser{msgs: msgs} +} + +// IsCumulative is used by kingpin to tell if it's an array or not. +func (p *rwProtoMsgFlagParser) IsCumulative() bool { + return true +} + +func (p *rwProtoMsgFlagParser) String() string { + ss := make([]string, 0, len(*p.msgs)) + for _, t := range *p.msgs { + ss = append(ss, string(t)) + } + return strings.Join(ss, ",") +} + +func (p *rwProtoMsgFlagParser) Set(opt string) error { + t := config.RemoteWriteProtoMsg(opt) + if err := t.Validate(); err != nil { + return err + } + for _, prev := range *p.msgs { + if prev == t { + return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs) + } + } + *p.msgs = append(*p.msgs, t) + return nil +} diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 03f3a9bc39..c827812e60 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -24,16 +24,19 @@ import ( "os/exec" "path/filepath" "runtime" + "strconv" "strings" "syscall" "testing" "time" + "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/rules" @@ -189,7 +192,7 @@ func TestSendAlerts(t *testing.T) { for i, tc := range testCases { tc := tc - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { senderFunc := senderFunc(func(alerts ...*notifier.Alert) { require.NotEmpty(t, tc.in, "sender called with 0 alert") require.Equal(t, tc.exp, alerts) @@ -498,3 +501,65 @@ func TestDocumentation(t *testing.T) { require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.") } + +func TestRwProtoMsgFlagParser(t *testing.T) { + defaultOpts := config.RemoteWriteProtoMsgs{ + config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2, + } + + for _, tcase := range []struct { + args []string + expected []config.RemoteWriteProtoMsg + expectedErr error + }{ + { + args: nil, + expected: defaultOpts, + }, + { + args: []string{"--test-proto-msgs", "test"}, + expectedErr: errors.New("unknown remote write protobuf message test, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request"), + }, + { + args: []string{"--test-proto-msgs", "io.prometheus.write.v2.Request"}, + expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2}, + }, + { + args: []string{ + "--test-proto-msgs", "io.prometheus.write.v2.Request", + "--test-proto-msgs", "io.prometheus.write.v2.Request", + }, + expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request] already"), + }, + { + args: []string{ + "--test-proto-msgs", "io.prometheus.write.v2.Request", + "--test-proto-msgs", "prometheus.WriteRequest", + }, + expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2, config.RemoteWriteProtoMsgV1}, + }, + { + args: []string{ + "--test-proto-msgs", "io.prometheus.write.v2.Request", + "--test-proto-msgs", "prometheus.WriteRequest", + "--test-proto-msgs", "io.prometheus.write.v2.Request", + }, + expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request prometheus.WriteRequest] already"), + }, + } { + t.Run(strings.Join(tcase.args, ","), func(t *testing.T) { + a := kingpin.New("test", "") + var opt []config.RemoteWriteProtoMsg + a.Flag("test-proto-msgs", "").Default(defaultOpts.Strings()...).SetValue(rwProtoMsgFlagValue(&opt)) + + _, err := a.Parse(tcase.args) + if tcase.expectedErr != nil { + require.Error(t, err) + require.Equal(t, tcase.expectedErr, err) + } else { + require.NoError(t, err) + require.Equal(t, tcase.expected, opt) + } + }) + } +} diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index dd6b56672c..62e317bf8b 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -72,7 +72,7 @@ func (p *queryLogTest) waitForPrometheus() error { var err error for x := 0; x < 20; x++ { var r *http.Response - if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == 200 { + if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == http.StatusOK { break } time.Sleep(500 * time.Millisecond) @@ -296,7 +296,7 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Equal(t, 1, qc) } else { - require.Greater(t, qc, 0, "no queries logged") + require.Positive(t, qc, "no queries logged") } p.validateLastQuery(t, ql) @@ -366,7 +366,7 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Equal(t, 1, qc) } else { - require.Greater(t, qc, 0, "no queries logged") + require.Positive(t, qc, "no queries logged") } } diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 601c3ced9f..400cae421a 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -22,6 +22,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/oklog/ulid" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" @@ -88,7 +89,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn blockDuration := getCompatibleBlockDuration(maxBlockDuration) mint = blockDuration * (mint / blockDuration) - db, err := tsdb.OpenDBReadOnly(outputDir, nil) + db, err := tsdb.OpenDBReadOnly(outputDir, "", nil) if err != nil { return err } @@ -191,6 +192,10 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn if quiet { break } + // Empty block, don't print. + if block.Compare(ulid.ULID{}) == 0 { + break + } blocks, err := db.Blocks() if err != nil { return fmt.Errorf("get blocks: %w", err) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index a62ae4fbf4..e1d275e97e 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -56,8 +56,8 @@ import ( "github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/notifier" _ "github.com/prometheus/prometheus/plugins" // Register plugins. - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/util/documentcli" ) @@ -235,12 +235,14 @@ func main() { tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.") dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() + dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String() dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() - tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped.") + tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.") dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() + dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String() dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() @@ -377,7 +379,7 @@ func main() { case testRulesCmd.FullCommand(): os.Exit(RulesUnitTest( - promql.LazyLoaderOpts{ + promqltest.LazyLoaderOpts{ EnableAtModifier: true, EnableNegativeOffset: true, }, @@ -396,9 +398,9 @@ func main() { os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable))) case tsdbDumpCmd.FullCommand(): - os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet))) + os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet))) case tsdbDumpOpenMetricsCmd.FullCommand(): - os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics))) + os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics))) // TODO(aSquare14): Work on adding support for custom block size. case openMetricsImportCmd.FullCommand(): os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 7306a3e648..78500fe937 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -25,6 +25,7 @@ import ( "os/exec" "path/filepath" "runtime" + "strconv" "strings" "syscall" "testing" @@ -410,7 +411,7 @@ func TestExitCodes(t *testing.T) { } { t.Run(c.file, func(t *testing.T) { for _, lintFatal := range []bool{true, false} { - t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) { + t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) { args := []string{"-test.main", "check", "config", "testdata/" + c.file} if lintFatal { args = append(args, "--lint-fatal") diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index b786c92976..2ed7244b1c 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -338,7 +338,7 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) { } func listBlocks(path string, humanReadable bool) error { - db, err := tsdb.OpenDBReadOnly(path, nil) + db, err := tsdb.OpenDBReadOnly(path, "", nil) if err != nil { return err } @@ -393,7 +393,7 @@ func getFormatedBytes(bytes int64, humanReadable bool) string { } func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) { - db, err := tsdb.OpenDBReadOnly(path, nil) + db, err := tsdb.OpenDBReadOnly(path, "", nil) if err != nil { return nil, nil, err } @@ -708,8 +708,8 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb. type SeriesSetFormatter func(series storage.SeriesSet) error -func dumpSamples(ctx context.Context, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) { - db, err := tsdb.OpenDBReadOnly(path, nil) +func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) { + db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil) if err != nil { return err } @@ -838,6 +838,10 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB } func displayHistogram(dataType string, datas []int, total int) { + if len(datas) == 0 { + fmt.Printf("%s: N/A\n\n", dataType) + return + } slices.Sort(datas) start, end, step := generateBucket(datas[0], datas[len(datas)-1]) sum := 0 @@ -852,9 +856,9 @@ func displayHistogram(dataType string, datas []int, total int) { } avg := sum / len(datas) fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1]) - maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end))) - maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step))) - maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount))) + maxLeftLen := strconv.Itoa(len(strconv.Itoa(end))) + maxRightLen := strconv.Itoa(len(strconv.Itoa(end + step))) + maxCountLen := strconv.Itoa(len(strconv.Itoa(maxCount))) for bucket, count := range buckets { percentage := 100.0 * count / total fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage)) diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go index 36a65d73e4..75089b168b 100644 --- a/cmd/promtool/tsdb_test.go +++ b/cmd/promtool/tsdb_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/tsdb" ) @@ -64,6 +64,7 @@ func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []strin err := dumpSamples( context.Background(), path, + t.TempDir(), mint, maxt, match, @@ -88,7 +89,7 @@ func normalizeNewLine(b []byte) []byte { } func TestTSDBDump(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m metric{foo="bar", baz="abc"} 1 2 3 4 5 heavy_metric{foo="bar"} 5 4 3 2 1 @@ -158,7 +159,7 @@ func TestTSDBDump(t *testing.T) { } func TestTSDBDumpOpenMetrics(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m my_counter{foo="bar", baz="abc"} 1 2 3 4 5 my_gauge{bar="foo", abc="baz"} 9 8 0 4 7 diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 6d6683a934..5451c5296c 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -36,13 +36,14 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" ) // RulesUnitTest does unit testing of rules based on the unit testing files provided. // More info about the file format can be found in the docs. -func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { +func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { failed := false var run *regexp.Regexp @@ -69,7 +70,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, diffFla return successExitCode } -func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error { +func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error { fmt.Println("Unit Testing: ", filename) b, err := os.ReadFile(filename) @@ -175,9 +176,9 @@ type testGroup struct { } // test performs the unit tests. -func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) { +func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) { // Setup testing suite. - suite, err := promql.NewLazyLoader(tg.seriesLoadingString(), queryOpts) + suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts) if err != nil { return []error{err} } @@ -413,7 +414,7 @@ Outer: gotSamples = append(gotSamples, parsedSample{ Labels: s.Metric.Copy(), Value: s.F, - Histogram: promql.HistogramTestExpression(s.H), + Histogram: promqltest.HistogramTestExpression(s.H), }) } @@ -443,7 +444,7 @@ Outer: expSamples = append(expSamples, parsedSample{ Labels: lb, Value: s.Value, - Histogram: promql.HistogramTestExpression(hist), + Histogram: promqltest.HistogramTestExpression(hist), }) } @@ -572,7 +573,7 @@ func (la labelsAndAnnotations) String() string { } s := "[\n0:" + indentLines("\n"+la[0].String(), " ") for i, l := range la[1:] { - s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ") + s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ") } s += "\n]" diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go index 971ddb40c5..2dbd5a4e51 100644 --- a/cmd/promtool/unittest_test.go +++ b/cmd/promtool/unittest_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/promqltest" ) func TestRulesUnitTest(t *testing.T) { @@ -28,7 +28,7 @@ func TestRulesUnitTest(t *testing.T) { tests := []struct { name string args args - queryOpts promql.LazyLoaderOpts + queryOpts promqltest.LazyLoaderOpts want int }{ { @@ -92,7 +92,7 @@ func TestRulesUnitTest(t *testing.T) { args: args{ files: []string{"./testdata/at-modifier-test.yml"}, }, - queryOpts: promql.LazyLoaderOpts{ + queryOpts: promqltest.LazyLoaderOpts{ EnableAtModifier: true, }, want: 0, @@ -109,7 +109,7 @@ func TestRulesUnitTest(t *testing.T) { args: args{ files: []string{"./testdata/negative-offset-test.yml"}, }, - queryOpts: promql.LazyLoaderOpts{ + queryOpts: promqltest.LazyLoaderOpts{ EnableNegativeOffset: true, }, want: 0, @@ -119,7 +119,7 @@ func TestRulesUnitTest(t *testing.T) { args: args{ files: []string{"./testdata/no-test-group-interval.yml"}, }, - queryOpts: promql.LazyLoaderOpts{ + queryOpts: promqltest.LazyLoaderOpts{ EnableNegativeOffset: true, }, want: 0, @@ -142,7 +142,7 @@ func TestRulesUnitTestRun(t *testing.T) { tests := []struct { name string args args - queryOpts promql.LazyLoaderOpts + queryOpts promqltest.LazyLoaderOpts want int }{ { diff --git a/config/config.go b/config/config.go index 1cfd588643..c924e30989 100644 --- a/config/config.go +++ b/config/config.go @@ -20,6 +20,7 @@ import ( "os" "path/filepath" "sort" + "strconv" "strings" "time" @@ -145,11 +146,17 @@ var ( ScrapeInterval: model.Duration(1 * time.Minute), ScrapeTimeout: model.Duration(10 * time.Second), EvaluationInterval: model.Duration(1 * time.Minute), + RuleQueryOffset: model.Duration(0 * time.Minute), // When native histogram feature flag is enabled, ScrapeProtocols default // changes to DefaultNativeHistogramScrapeProtocols. ScrapeProtocols: DefaultScrapeProtocols, } + DefaultRuntimeConfig = RuntimeConfig{ + // Go runtime tuning. + GoGC: 75, + } + // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ // ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals. @@ -173,6 +180,7 @@ var ( // DefaultRemoteWriteConfig is the default remote write configuration. DefaultRemoteWriteConfig = RemoteWriteConfig{ RemoteTimeout: model.Duration(30 * time.Second), + ProtobufMessage: RemoteWriteProtoMsgV1, QueueConfig: DefaultQueueConfig, MetadataConfig: DefaultMetadataConfig, HTTPClientConfig: config.DefaultHTTPClientConfig, @@ -224,6 +232,7 @@ var ( // Config is the top-level configuration for Prometheus's config files. type Config struct { GlobalConfig GlobalConfig `yaml:"global"` + Runtime RuntimeConfig `yaml:"runtime,omitempty"` AlertingConfig AlertingConfig `yaml:"alerting,omitempty"` RuleFiles []string `yaml:"rule_files,omitempty"` ScrapeConfigFiles []string `yaml:"scrape_config_files,omitempty"` @@ -271,7 +280,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { jobNames := map[string]string{} for i, scfg := range c.ScrapeConfigs { - // We do these checks for library users that would not call Validate in + // We do these checks for library users that would not call validate in // Unmarshal. if err := scfg.Validate(c.GlobalConfig); err != nil { return nil, err @@ -334,6 +343,14 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { c.GlobalConfig = DefaultGlobalConfig } + // If a runtime block was open but empty the default runtime config is overwritten. + // We have to restore it here. + if c.Runtime.isZero() { + c.Runtime = DefaultRuntimeConfig + // Use the GOGC env var value if the runtime section is empty. + c.Runtime.GoGC = getGoGCEnv() + } + for _, rf := range c.RuleFiles { if !patRulePath.MatchString(rf) { return fmt.Errorf("invalid rule file path %q", rf) @@ -397,6 +414,8 @@ type GlobalConfig struct { ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` // How frequently to evaluate rules by default. EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` + // Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received. + RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"` // File to which PromQL queries are logged. QueryLogFile string `yaml:"query_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. @@ -556,10 +575,22 @@ func (c *GlobalConfig) isZero() bool { c.ScrapeInterval == 0 && c.ScrapeTimeout == 0 && c.EvaluationInterval == 0 && + c.RuleQueryOffset == 0 && c.QueryLogFile == "" && c.ScrapeProtocols == nil } +// RuntimeConfig configures the values for the process behavior. +type RuntimeConfig struct { + // The Go garbage collection target percentage. + GoGC int `yaml:"gogc,omitempty"` +} + +// isZero returns true iff the global config is the zero value. +func (c *RuntimeConfig) isZero() bool { + return c.GoGC == 0 +} + type ScrapeConfigs struct { ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` } @@ -1025,6 +1056,49 @@ func CheckTargetAddress(address model.LabelValue) error { return nil } +// RemoteWriteProtoMsg represents the known protobuf message for the remote write +// 1.0 and 2.0 specs. +type RemoteWriteProtoMsg string + +// Validate returns error if the given reference for the protobuf message is not supported. +func (s RemoteWriteProtoMsg) Validate() error { + switch s { + case RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2: + return nil + default: + return fmt.Errorf("unknown remote write protobuf message %v, supported: %v", s, RemoteWriteProtoMsgs{RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2}.String()) + } +} + +type RemoteWriteProtoMsgs []RemoteWriteProtoMsg + +func (m RemoteWriteProtoMsgs) Strings() []string { + ret := make([]string, 0, len(m)) + for _, typ := range m { + ret = append(ret, string(typ)) + } + return ret +} + +func (m RemoteWriteProtoMsgs) String() string { + return strings.Join(m.Strings(), ", ") +} + +var ( + // RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf + // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/. + // + // NOTE: This string is used for both HTTP header values and config value, so don't change + // this reference. + RemoteWriteProtoMsgV1 RemoteWriteProtoMsg = "prometheus.WriteRequest" + // RemoteWriteProtoMsgV2 represents the `io.prometheus.write.v2.Request` protobuf + // message introduced in https://prometheus.io/docs/specs/remote_write_spec_2_0/ + // + // NOTE: This string is used for both HTTP header values and config value, so don't change + // this reference. + RemoteWriteProtoMsgV2 RemoteWriteProtoMsg = "io.prometheus.write.v2.Request" +) + // RemoteWriteConfig is the configuration for writing to remote storage. type RemoteWriteConfig struct { URL *config.URL `yaml:"url"` @@ -1034,6 +1108,9 @@ type RemoteWriteConfig struct { Name string `yaml:"name,omitempty"` SendExemplars bool `yaml:"send_exemplars,omitempty"` SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"` + // ProtobufMessage specifies the protobuf message to use against the remote + // receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/ + ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -1068,6 +1145,10 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err return err } + if err := c.ProtobufMessage.Validate(); err != nil { + return fmt.Errorf("invalid protobuf_message value: %w", err) + } + // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. // We cannot make it a pointer as the parser panics for inlined pointer structs. // Thus we just do its validation here. @@ -1207,3 +1288,19 @@ func filePath(filename string) string { func fileErr(filename string, err error) error { return fmt.Errorf("%q: %w", filePath(filename), err) } + +func getGoGCEnv() int { + goGCEnv := os.Getenv("GOGC") + // If the GOGC env var is set, use the same logic as upstream Go. + if goGCEnv != "" { + // Special case for GOGC=off. + if strings.ToLower(goGCEnv) == "off" { + return -1 + } + i, err := strconv.Atoi(goGCEnv) + if err == nil { + return i + } + } + return DefaultRuntimeConfig.GoGC +} diff --git a/config/config_default_test.go b/config/config_default_test.go index 26623590d9..31133f1e04 100644 --- a/config/config_default_test.go +++ b/config/config_default_test.go @@ -19,6 +19,7 @@ const ruleFilesConfigFile = "testdata/rules_abs_path.good.yml" var ruleFilesExpectedConf = &Config{ GlobalConfig: DefaultGlobalConfig, + Runtime: DefaultRuntimeConfig, RuleFiles: []string{ "testdata/first.rules", "testdata/rules/second.rules", diff --git a/config/config_test.go b/config/config_test.go index 14981d25f0..3c4907a46c 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -76,6 +76,7 @@ const ( globLabelLimit = 30 globLabelNameLengthLimit = 200 globLabelValueLengthLimit = 200 + globalGoGC = 42 ) var expectedConf = &Config{ @@ -96,6 +97,10 @@ var expectedConf = &Config{ ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, }, + Runtime: RuntimeConfig{ + GoGC: globalGoGC, + }, + RuleFiles: []string{ filepath.FromSlash("testdata/first.rules"), filepath.FromSlash("testdata/my/*.rules"), @@ -103,9 +108,10 @@ var expectedConf = &Config{ RemoteWriteConfigs: []*RemoteWriteConfig{ { - URL: mustParseURL("http://remote1/push"), - RemoteTimeout: model.Duration(30 * time.Second), - Name: "drop_expensive", + URL: mustParseURL("http://remote1/push"), + ProtobufMessage: RemoteWriteProtoMsgV1, + RemoteTimeout: model.Duration(30 * time.Second), + Name: "drop_expensive", WriteRelabelConfigs: []*relabel.Config{ { SourceLabels: model.LabelNames{"__name__"}, @@ -132,11 +138,12 @@ var expectedConf = &Config{ }, }, { - URL: mustParseURL("http://remote2/push"), - RemoteTimeout: model.Duration(30 * time.Second), - QueueConfig: DefaultQueueConfig, - MetadataConfig: DefaultMetadataConfig, - Name: "rw_tls", + URL: mustParseURL("http://remote2/push"), + ProtobufMessage: RemoteWriteProtoMsgV2, + RemoteTimeout: model.Duration(30 * time.Second), + QueueConfig: DefaultQueueConfig, + MetadataConfig: DefaultMetadataConfig, + Name: "rw_tls", HTTPClientConfig: config.HTTPClientConfig{ TLSConfig: config.TLSConfig{ CertFile: filepath.FromSlash("testdata/valid_cert_file"), @@ -993,6 +1000,7 @@ var expectedConf = &Config{ HostNetworkingHost: "localhost", RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, + MatchFirstNetwork: true, }, }, }, @@ -1794,6 +1802,10 @@ var expectedErrors = []struct { filename: "remote_write_authorization_header.bad.yml", errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`, }, + { + filename: "remote_write_wrong_msg.bad.yml", + errMsg: `invalid protobuf_message value: unknown remote write protobuf message io.prometheus.writet.v2.Request, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request`, + }, { filename: "remote_write_url_missing.bad.yml", errMsg: `url for remote_write is empty`, @@ -2081,6 +2093,7 @@ func TestEmptyGlobalBlock(t *testing.T) { c, err := Load("global:\n", false, log.NewNopLogger()) require.NoError(t, err) exp := DefaultConfig + exp.Runtime = DefaultRuntimeConfig require.Equal(t, exp, *c) } diff --git a/config/config_windows_test.go b/config/config_windows_test.go index 7fd1d46f6d..db4d46ef13 100644 --- a/config/config_windows_test.go +++ b/config/config_windows_test.go @@ -17,6 +17,7 @@ const ruleFilesConfigFile = "testdata/rules_abs_path_windows.good.yml" var ruleFilesExpectedConf = &Config{ GlobalConfig: DefaultGlobalConfig, + Runtime: DefaultRuntimeConfig, RuleFiles: []string{ "testdata\\first.rules", "testdata\\rules\\second.rules", diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index b584301649..0e0aa2bd5d 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -14,6 +14,9 @@ global: monitor: codelab foo: bar +runtime: + gogc: 42 + rule_files: - "first.rules" - "my/*.rules" @@ -34,6 +37,7 @@ remote_write: key_file: valid_key_file - url: http://remote2/push + protobuf_message: io.prometheus.write.v2.Request name: rw_tls tls_config: cert_file: valid_cert_file diff --git a/config/testdata/remote_write_wrong_msg.bad.yml b/config/testdata/remote_write_wrong_msg.bad.yml new file mode 100644 index 0000000000..0918309540 --- /dev/null +++ b/config/testdata/remote_write_wrong_msg.bad.yml @@ -0,0 +1,3 @@ +remote_write: + - url: localhost:9090 + protobuf_message: io.prometheus.writet.v2.Request # typo in 'write" diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index aa79fd9c62..a44912481a 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net" + "strconv" "strings" "time" @@ -41,28 +42,29 @@ import ( ) const ( - ec2Label = model.MetaLabelPrefix + "ec2_" - ec2LabelAMI = ec2Label + "ami" - ec2LabelAZ = ec2Label + "availability_zone" - ec2LabelAZID = ec2Label + "availability_zone_id" - ec2LabelArch = ec2Label + "architecture" - ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses" - ec2LabelInstanceID = ec2Label + "instance_id" - ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle" - ec2LabelInstanceState = ec2Label + "instance_state" - ec2LabelInstanceType = ec2Label + "instance_type" - ec2LabelOwnerID = ec2Label + "owner_id" - ec2LabelPlatform = ec2Label + "platform" - ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id" - ec2LabelPrivateDNS = ec2Label + "private_dns_name" - ec2LabelPrivateIP = ec2Label + "private_ip" - ec2LabelPublicDNS = ec2Label + "public_dns_name" - ec2LabelPublicIP = ec2Label + "public_ip" - ec2LabelRegion = ec2Label + "region" - ec2LabelSubnetID = ec2Label + "subnet_id" - ec2LabelTag = ec2Label + "tag_" - ec2LabelVPCID = ec2Label + "vpc_id" - ec2LabelSeparator = "," + ec2Label = model.MetaLabelPrefix + "ec2_" + ec2LabelAMI = ec2Label + "ami" + ec2LabelAZ = ec2Label + "availability_zone" + ec2LabelAZID = ec2Label + "availability_zone_id" + ec2LabelArch = ec2Label + "architecture" + ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses" + ec2LabelInstanceID = ec2Label + "instance_id" + ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle" + ec2LabelInstanceState = ec2Label + "instance_state" + ec2LabelInstanceType = ec2Label + "instance_type" + ec2LabelOwnerID = ec2Label + "owner_id" + ec2LabelPlatform = ec2Label + "platform" + ec2LabelPrimaryIPv6Addresses = ec2Label + "primary_ipv6_addresses" + ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id" + ec2LabelPrivateDNS = ec2Label + "private_dns_name" + ec2LabelPrivateIP = ec2Label + "private_ip" + ec2LabelPublicDNS = ec2Label + "public_dns_name" + ec2LabelPublicIP = ec2Label + "public_ip" + ec2LabelRegion = ec2Label + "region" + ec2LabelSubnetID = ec2Label + "subnet_id" + ec2LabelTag = ec2Label + "tag_" + ec2LabelVPCID = ec2Label + "vpc_id" + ec2LabelSeparator = "," ) // DefaultEC2SDConfig is the default EC2 SD configuration. @@ -279,7 +281,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error if inst.PrivateDnsName != nil { labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName) } - addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) + addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port)) labels[model.AddressLabel] = model.LabelValue(addr) if inst.Platform != nil { @@ -316,6 +318,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error var subnets []string var ipv6addrs []string + var primaryipv6addrs []string subnetsMap := make(map[string]struct{}) for _, eni := range inst.NetworkInterfaces { if eni.SubnetId == nil { @@ -329,6 +332,15 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error for _, ipv6addr := range eni.Ipv6Addresses { ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address) + if *ipv6addr.IsPrimaryIpv6 { + // we might have to extend the slice with more than one element + // that could leave empty strings in the list which is intentional + // to keep the position/device index information + for int64(len(primaryipv6addrs)) <= *eni.Attachment.DeviceIndex { + primaryipv6addrs = append(primaryipv6addrs, "") + } + primaryipv6addrs[*eni.Attachment.DeviceIndex] = *ipv6addr.Ipv6Address + } } } labels[ec2LabelSubnetID] = model.LabelValue( @@ -341,6 +353,12 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error strings.Join(ipv6addrs, ec2LabelSeparator) + ec2LabelSeparator) } + if len(primaryipv6addrs) > 0 { + labels[ec2LabelPrimaryIPv6Addresses] = model.LabelValue( + ec2LabelSeparator + + strings.Join(primaryipv6addrs, ec2LabelSeparator) + + ec2LabelSeparator) + } } for _, t := range inst.Tags { diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index 86b138be55..0ad7f2d541 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net" + "strconv" "strings" "time" @@ -229,7 +230,7 @@ func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, lightsailLabelRegion: model.LabelValue(d.cfg.Region), } - addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) + addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port)) labels[model.AddressLabel] = model.LabelValue(addr) if inst.PublicIpAddress != nil { diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 7c2ece2c7b..70d95b9f3a 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -20,6 +20,7 @@ import ( "math/rand" "net" "net/http" + "strconv" "strings" "sync" "time" @@ -492,7 +493,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM } if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil { labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress) - address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port)) + address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, strconv.Itoa(d.port)) labels[model.AddressLabel] = model.LabelValue(address) return labels, nil } diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 40eed7697a..bdc1fc8dce 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -539,9 +539,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr // since the service may be registered remotely through a different node. var addr string if serviceNode.Service.Address != "" { - addr = net.JoinHostPort(serviceNode.Service.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) + addr = net.JoinHostPort(serviceNode.Service.Address, strconv.Itoa(serviceNode.Service.Port)) } else { - addr = net.JoinHostPort(serviceNode.Node.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) + addr = net.JoinHostPort(serviceNode.Node.Address, strconv.Itoa(serviceNode.Service.Port)) } labels := model.LabelSet{ diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index 18380b7296..ecee60cb1f 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -177,7 +177,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { } labels := model.LabelSet{ - doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)), + doLabelID: model.LabelValue(strconv.Itoa(droplet.ID)), doLabelName: model.LabelValue(droplet.Name), doLabelImage: model.LabelValue(droplet.Image.Slug), doLabelImageName: model.LabelValue(droplet.Image.Name), diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index cf56a2ad02..314c3d38cd 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net" + "strconv" "strings" "sync" "time" @@ -200,7 +201,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ tg := &targetgroup.Group{} hostPort := func(a string, p int) model.LabelValue { - return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p))) + return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p))) } for _, record := range response.Answer { @@ -209,7 +210,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ switch addr := record.(type) { case *dns.SRV: dnsSrvRecordTarget = model.LabelValue(addr.Target) - dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port)) + dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port))) // Remove the final dot from rooted DNS names to make them look more usual. addr.Target = strings.TrimRight(addr.Target, ".") diff --git a/discovery/eureka/client.go b/discovery/eureka/client.go index 52e8ce7b48..5a90968f1b 100644 --- a/discovery/eureka/client.go +++ b/discovery/eureka/client.go @@ -97,6 +97,7 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic resp.Body.Close() }() + //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode) } diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index 6d0599dfa2..df56f94c5f 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -15,7 +15,6 @@ package hetzner import ( "context" - "fmt" "net" "net/http" "strconv" @@ -92,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er for i, server := range servers { labels := model.LabelSet{ hetznerLabelRole: model.LabelValue(HetznerRoleHcloud), - hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)), + hetznerLabelServerID: model.LabelValue(strconv.FormatInt(server.ID, 10)), hetznerLabelServerName: model.LabelValue(server.Name), hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name), hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()), @@ -102,10 +101,10 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name), hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone), hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name), - hetznerLabelHcloudCPUCores: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Cores)), + hetznerLabelHcloudCPUCores: model.LabelValue(strconv.Itoa(server.ServerType.Cores)), hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType), - hetznerLabelHcloudMemoryGB: model.LabelValue(fmt.Sprintf("%d", int(server.ServerType.Memory))), - hetznerLabelHcloudDiskGB: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Disk)), + hetznerLabelHcloudMemoryGB: model.LabelValue(strconv.Itoa(int(server.ServerType.Memory))), + hetznerLabelHcloudDiskGB: model.LabelValue(strconv.Itoa(server.ServerType.Disk)), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))), } diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index b862c33f5b..64155bfaed 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -87,6 +87,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) resp.Body.Close() }() + //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode) } @@ -112,7 +113,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP), hetznerLabelServerStatus: model.LabelValue(server.Server.Status), hetznerLabelRobotProduct: model.LabelValue(server.Server.Product), - hetznerLabelRobotCancelled: model.LabelValue(fmt.Sprintf("%t", server.Server.Canceled)), + hetznerLabelRobotCancelled: model.LabelValue(strconv.FormatBool(server.Server.Canceled)), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))), } diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index 1ed699645d..a455a8e341 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -720,7 +720,7 @@ func staticConfig(addrs ...string) discovery.StaticConfig { var cfg discovery.StaticConfig for i, addr := range addrs { cfg = append(cfg, &targetgroup.Group{ - Source: fmt.Sprint(i), + Source: strconv.Itoa(i), Targets: []model.LabelSet{ {model.AddressLabel: model.LabelValue(addr)}, }, diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 2a5475b854..634a6b1d4b 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -186,12 +186,12 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { if d.lastResults != nil && d.eventPollingEnabled { // Check to see if there have been any events. If so, refresh our data. - opts := linodego.ListOptions{ + eventsOpts := linodego.ListOptions{ PageOptions: &linodego.PageOptions{Page: 1}, PageSize: 25, Filter: fmt.Sprintf(filterTemplate, d.lastRefreshTimestamp.Format("2006-01-02T15:04:05")), } - events, err := d.client.ListEvents(ctx, &opts) + events, err := d.client.ListEvents(ctx, &eventsOpts) if err != nil { var e *linodego.Error if errors.As(err, &e) && e.Code == http.StatusUnauthorized { @@ -232,31 +232,40 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro tg := &targetgroup.Group{ Source: "Linode", } - opts := linodego.ListOptions{ + // We need 3 of these because Linodego writes into the structure during pagination + listInstancesOpts := linodego.ListOptions{ + PageSize: 500, + } + listIPAddressesOpts := linodego.ListOptions{ + PageSize: 500, + } + listIPv6RangesOpts := linodego.ListOptions{ PageSize: 500, } // If region filter provided, use it to constrain results. if d.region != "" { - opts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) + listInstancesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) + listIPAddressesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) + listIPv6RangesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) } // Gather all linode instances. - instances, err := d.client.ListInstances(ctx, &opts) + instances, err := d.client.ListInstances(ctx, &listInstancesOpts) if err != nil { d.metrics.failuresCount.Inc() return nil, err } // Gather detailed IP address info for all IPs on all linode instances. - detailedIPs, err := d.client.ListIPAddresses(ctx, &opts) + detailedIPs, err := d.client.ListIPAddresses(ctx, &listIPAddressesOpts) if err != nil { d.metrics.failuresCount.Inc() return nil, err } // Gather detailed IPv6 Range info for all linode instances. - ipv6RangeList, err := d.client.ListIPv6Ranges(ctx, &opts) + ipv6RangeList, err := d.client.ListIPv6Ranges(ctx, &listIPv6RangesOpts) if err != nil { d.metrics.failuresCount.Inc() return nil, err @@ -325,7 +334,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro } labels := model.LabelSet{ - linodeLabelID: model.LabelValue(fmt.Sprintf("%d", instance.ID)), + linodeLabelID: model.LabelValue(strconv.Itoa(instance.ID)), linodeLabelName: model.LabelValue(instance.Label), linodeLabelImage: model.LabelValue(instance.Image), linodeLabelPrivateIPv4: model.LabelValue(privateIPv4), @@ -338,13 +347,13 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro linodeLabelType: model.LabelValue(instance.Type), linodeLabelStatus: model.LabelValue(instance.Status), linodeLabelGroup: model.LabelValue(instance.Group), - linodeLabelGPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.GPUs)), + linodeLabelGPUs: model.LabelValue(strconv.Itoa(instance.Specs.GPUs)), linodeLabelHypervisor: model.LabelValue(instance.Hypervisor), linodeLabelBackups: model.LabelValue(backupsStatus), - linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)), - linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)), - linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)), - linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)), + linodeLabelSpecsDiskBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Disk)<<20, 10)), + linodeLabelSpecsMemoryBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Memory)<<20, 10)), + linodeLabelSpecsVCPUs: model.LabelValue(strconv.Itoa(instance.Specs.VCPUs)), + linodeLabelSpecsTransferBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Transfer)<<20, 10)), } addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10)) diff --git a/discovery/manager.go b/discovery/manager.go index f14071af30..897d7d151c 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -120,6 +120,16 @@ func Name(n string) func(*Manager) { } } +// Updatert sets the updatert of the manager. +// Used to speed up tests. +func Updatert(u time.Duration) func(*Manager) { + return func(m *Manager) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.updatert = u + } +} + // HTTPClientOptions sets the list of HTTP client options to expose to // Discoverers. It is up to Discoverers to choose to use the options provided. func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) { diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 656d7c3c66..be07edbdb4 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -720,7 +720,7 @@ func staticConfig(addrs ...string) StaticConfig { var cfg StaticConfig for i, addr := range addrs { cfg = append(cfg, &targetgroup.Group{ - Source: fmt.Sprint(i), + Source: strconv.Itoa(i), Targets: []model.LabelSet{ {model.AddressLabel: model.LabelValue(addr)}, }, diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 3e9e15967f..38b47accff 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -505,7 +505,7 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string { host = task.Host } - return net.JoinHostPort(host, fmt.Sprintf("%d", port)) + return net.JoinHostPort(host, strconv.Itoa(int(port))) } // Get a list of ports and a list of labels from a PortMapping. diff --git a/discovery/metrics.go b/discovery/metrics.go index e738331a18..356be1ddcb 100644 --- a/discovery/metrics.go +++ b/discovery/metrics.go @@ -19,16 +19,6 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -var ( - clientGoRequestMetrics = &clientGoRequestMetricAdapter{} - clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{} -) - -func init() { - clientGoRequestMetrics.RegisterWithK8sGoClient() - clientGoWorkloadMetrics.RegisterWithK8sGoClient() -} - // Metrics to be used with a discovery manager. type Metrics struct { FailedConfigs prometheus.Gauge diff --git a/discovery/metrics_k8s_client.go b/discovery/metrics_k8s_client.go index f16245684b..c13ce53317 100644 --- a/discovery/metrics_k8s_client.go +++ b/discovery/metrics_k8s_client.go @@ -35,6 +35,11 @@ const ( workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue" ) +var ( + clientGoRequestMetrics = &clientGoRequestMetricAdapter{} + clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{} +) + var ( // Metrics for client-go's HTTP requests. clientGoRequestResultMetricVec = prometheus.NewCounterVec( @@ -135,6 +140,9 @@ func clientGoMetrics() []prometheus.Collector { } func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error { + clientGoRequestMetrics.RegisterWithK8sGoClient() + clientGoWorkloadMetrics.RegisterWithK8sGoClient() + for _, collector := range clientGoMetrics() { err := registerer.Register(collector) if err != nil { diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 6a2b2d9302..11445092ee 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -22,8 +22,10 @@ import ( "strconv" "time" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" @@ -58,6 +60,7 @@ var DefaultDockerSDConfig = DockerSDConfig{ Filters: []Filter{}, HostNetworkingHost: "localhost", HTTPClientConfig: config.DefaultHTTPClientConfig, + MatchFirstNetwork: true, } func init() { @@ -73,7 +76,8 @@ type DockerSDConfig struct { Filters []Filter `yaml:"filters"` HostNetworkingHost string `yaml:"host_networking_host"` - RefreshInterval model.Duration `yaml:"refresh_interval"` + RefreshInterval model.Duration `yaml:"refresh_interval"` + MatchFirstNetwork bool `yaml:"match_first_network"` } // NewDiscovererMetrics implements discovery.Config. @@ -119,6 +123,7 @@ type DockerDiscovery struct { port int hostNetworkingHost string filters filters.Args + matchFirstNetwork bool } // NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets. @@ -131,6 +136,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discove d := &DockerDiscovery{ port: conf.Port, hostNetworkingHost: conf.HostNetworkingHost, + matchFirstNetwork: conf.MatchFirstNetwork, } hostURL, err := url.Parse(conf.Host) @@ -202,6 +208,11 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er return nil, fmt.Errorf("error while computing network labels: %w", err) } + allContainers := make(map[string]types.Container) + for _, c := range containers { + allContainers[c.ID] = c + } + for _, c := range containers { if len(c.Names) == 0 { continue @@ -218,7 +229,50 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er commonLabels[dockerLabelContainerLabelPrefix+ln] = v } - for _, n := range c.NetworkSettings.Networks { + networks := c.NetworkSettings.Networks + containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode) + if len(networks) == 0 { + // Try to lookup shared networks + for { + if containerNetworkMode.IsContainer() { + tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()] + if !exists { + break + } + networks = tmpContainer.NetworkSettings.Networks + containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode) + if len(networks) > 0 { + break + } + } else { + break + } + } + } + + if d.matchFirstNetwork && len(networks) > 1 { + // Match user defined network + if containerNetworkMode.IsUserDefined() { + networkMode := string(containerNetworkMode) + networks = map[string]*network.EndpointSettings{networkMode: networks[networkMode]} + } else { + // Get first network if container network mode has "none" value. + // This case appears under certain condition: + // 1. Container created with network set to "--net=none". + // 2. Disconnect network "none". + // 3. Reconnect network with user defined networks. + var first string + for k, n := range networks { + if n != nil { + first = k + break + } + } + networks = map[string]*network.EndpointSettings{first: networks[first]} + } + } + + for _, n := range networks { var added bool for _, p := range c.Ports { diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go index fec56d3e5f..c108ddf582 100644 --- a/discovery/moby/docker_test.go +++ b/discovery/moby/docker_test.go @@ -16,6 +16,7 @@ package moby import ( "context" "fmt" + "sort" "testing" "github.com/go-kit/log" @@ -59,7 +60,7 @@ host: %s tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 3) + require.Len(t, tg.Targets, 6) for i, lbls := range []model.LabelSet{ { @@ -113,9 +114,259 @@ host: %s "__meta_docker_container_network_mode": "host", "__meta_docker_network_ip": "", }, + { + "__address__": "172.20.0.2:3306", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.2:33060", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + { + "__address__": "172.20.0.2:9104", + "__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysqlexporter", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_label_maintainer": "The Prometheus Authors ", + "__meta_docker_container_name": "/dockersd_mysql_exporter", + "__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "9104", + }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) } } + +func TestDockerSDRefreshMatchAllNetworks(t *testing.T) { + sdmock := NewSDMock(t, "dockerprom") + sdmock.Setup() + + e := sdmock.Endpoint() + url := e[:len(e)-1] + cfgString := fmt.Sprintf(` +--- +host: %s +`, url) + var cfg DockerSDConfig + require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) + + cfg.MatchFirstNetwork = false + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() + defer refreshMetrics.Unregister() + d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics) + require.NoError(t, err) + + ctx := context.Background() + tgs, err := d.refresh(ctx) + require.NoError(t, err) + + require.Len(t, tgs, 1) + + tg := tgs[0] + require.NotNil(t, tg) + require.NotNil(t, tg.Targets) + require.Len(t, tg.Targets, 9) + + sortFunc := func(labelSets []model.LabelSet) { + sort.Slice(labelSets, func(i, j int) bool { + return labelSets[i]["__address__"] < labelSets[j]["__address__"] + }) + } + expected := []model.LabelSet{ + { + "__address__": "172.19.0.2:9100", + "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "node", + "__meta_docker_container_label_com_docker_compose_version": "1.25.0", + "__meta_docker_container_label_maintainer": "The Prometheus Authors ", + "__meta_docker_container_label_prometheus_job": "node", + "__meta_docker_container_name": "/dockersd_node_1", + "__meta_docker_container_network_mode": "dockersd_default", + "__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.19.0.2", + "__meta_docker_network_label_com_docker_compose_network": "default", + "__meta_docker_network_label_com_docker_compose_project": "dockersd", + "__meta_docker_network_label_com_docker_compose_version": "1.25.0", + "__meta_docker_network_name": "dockersd_default", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "9100", + }, + { + "__address__": "172.19.0.3:80", + "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "noport", + "__meta_docker_container_label_com_docker_compose_version": "1.25.0", + "__meta_docker_container_label_maintainer": "The Prometheus Authors ", + "__meta_docker_container_label_prometheus_job": "noport", + "__meta_docker_container_name": "/dockersd_noport_1", + "__meta_docker_container_network_mode": "dockersd_default", + "__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.19.0.3", + "__meta_docker_network_label_com_docker_compose_network": "default", + "__meta_docker_network_label_com_docker_compose_project": "dockersd", + "__meta_docker_network_label_com_docker_compose_version": "1.25.0", + "__meta_docker_network_name": "dockersd_default", + "__meta_docker_network_scope": "local", + }, + { + "__address__": "localhost", + "__meta_docker_container_id": "54ed6cc5c0988260436cb0e739b7b6c9cad6c439a93b4c4fdbe9753e1c94b189", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "host_networking", + "__meta_docker_container_label_com_docker_compose_version": "1.25.0", + "__meta_docker_container_name": "/dockersd_host_networking_1", + "__meta_docker_container_network_mode": "host", + "__meta_docker_network_ip": "", + }, + { + "__address__": "172.20.0.2:3306", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.2:33060", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + { + "__address__": "172.21.0.2:3306", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.2", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.21.0.2:33060", + "__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_mysql", + "__meta_docker_container_network_mode": "dockersd_private", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.2", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + { + "__address__": "172.21.0.2:9104", + "__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysqlexporter", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_label_maintainer": "The Prometheus Authors ", + "__meta_docker_container_name": "/dockersd_mysql_exporter", + "__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.2", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "9104", + }, + { + "__address__": "172.20.0.2:9104", + "__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysqlexporter", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_label_maintainer": "The Prometheus Authors ", + "__meta_docker_container_name": "/dockersd_mysql_exporter", + "__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.2", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "9104", + }, + } + + sortFunc(expected) + sortFunc(tg.Targets) + + for i, lbls := range expected { + t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { + require.Equal(t, lbls, tg.Targets[i]) + }) + } +} diff --git a/discovery/moby/network.go b/discovery/moby/network.go index 0e0d0041de..794d2e607d 100644 --- a/discovery/moby/network.go +++ b/discovery/moby/network.go @@ -15,7 +15,7 @@ package moby import ( "context" - "fmt" + "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/client" @@ -44,8 +44,8 @@ func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix s labelPrefix + labelNetworkID: network.ID, labelPrefix + labelNetworkName: network.Name, labelPrefix + labelNetworkScope: network.Scope, - labelPrefix + labelNetworkInternal: fmt.Sprintf("%t", network.Internal), - labelPrefix + labelNetworkIngress: fmt.Sprintf("%t", network.Ingress), + labelPrefix + labelNetworkInternal: strconv.FormatBool(network.Internal), + labelPrefix + labelNetworkIngress: strconv.FormatBool(network.Ingress), } for k, v := range network.Labels { ln := strutil.SanitizeLabelName(k) diff --git a/discovery/moby/nodes.go b/discovery/moby/nodes.go index a7c5551c02..b5be844eda 100644 --- a/discovery/moby/nodes.go +++ b/discovery/moby/nodes.go @@ -66,7 +66,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err swarmLabelNodeAddress: model.LabelValue(n.Status.Addr), } if n.ManagerStatus != nil { - labels[swarmLabelNodeManagerLeader] = model.LabelValue(fmt.Sprintf("%t", n.ManagerStatus.Leader)) + labels[swarmLabelNodeManagerLeader] = model.LabelValue(strconv.FormatBool(n.ManagerStatus.Leader)) labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability) labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr) } diff --git a/discovery/moby/services.go b/discovery/moby/services.go index 1d472b5c00..c61b499259 100644 --- a/discovery/moby/services.go +++ b/discovery/moby/services.go @@ -116,7 +116,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group, labels[model.LabelName(k)] = model.LabelValue(v) } - addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port)) + addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) diff --git a/discovery/moby/tasks.go b/discovery/moby/tasks.go index 2505a7b07a..38b9d33de2 100644 --- a/discovery/moby/tasks.go +++ b/discovery/moby/tasks.go @@ -150,7 +150,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err labels[model.LabelName(k)] = model.LabelValue(v) } - addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port)) + addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) diff --git a/discovery/moby/testdata/dockerprom/containers/json.json b/discovery/moby/testdata/dockerprom/containers/json.json index 37f575d22c..ebfc56b6d5 100644 --- a/discovery/moby/testdata/dockerprom/containers/json.json +++ b/discovery/moby/testdata/dockerprom/containers/json.json @@ -128,5 +128,105 @@ } }, "Mounts": [] + }, + { + "Id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8", + "Names": [ + "/dockersd_mysql" + ], + "Image": "mysql:5.7.29", + "ImageID": "sha256:5d9483f9a7b21c87e0f5b9776c3e06567603c28c0062013eda127c968175f5e8", + "Command": "mysqld", + "Created": 1616273136, + "Ports": [ + { + "PrivatePort": 3306, + "Type": "tcp" + }, + { + "PrivatePort": 33060, + "Type": "tcp" + } + ], + "Labels": { + "com.docker.compose.project": "dockersd", + "com.docker.compose.service": "mysql", + "com.docker.compose.version": "2.2.2" + }, + "State": "running", + "Status": "Up 40 seconds", + "HostConfig": { + "NetworkMode": "dockersd_private" + }, + "NetworkSettings": { + "Networks": { + "dockersd_private": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "EndpointID": "80f8a61b37701a9991bb98c75ddd23fd9b7c16b5575ca81343f6b44ff4a2a9d9", + "Gateway": "172.20.0.1", + "IPAddress": "172.20.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:14:00:0a", + "DriverOpts": null + }, + "dockersd_private1": { + "IPAMConfig": {}, + "Links": null, + "Aliases": [ + "mysql", + "mysql", + "f9ade4b83199" + ], + "NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "EndpointID": "f80921d10e78c99a5907705aae75befea40c3d3e9f820e66ab392f7274be16b8", + "Gateway": "172.21.0.1", + "IPAddress": "172.21.0.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:15:00:02", + "DriverOpts": null + } + } + }, + "Mounts": [] + }, + { + "Id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06", + "Names": [ + "/dockersd_mysql_exporter" + ], + "Image": "prom/mysqld-exporter:latest", + "ImageID": "sha256:121b8a7cd0525dd89aaec58ad7d34c3bb3714740e5a67daf6510ccf71ab219a9", + "Command": "/bin/mysqld_exporter", + "Created": 1616273136, + "Ports": [ + { + "PrivatePort": 9104, + "Type": "tcp" + } + ], + "Labels": { + "com.docker.compose.project": "dockersd", + "com.docker.compose.service": "mysqlexporter", + "com.docker.compose.version": "2.2.2", + "maintainer": "The Prometheus Authors " + }, + "State": "running", + "Status": "Up 40 seconds", + "HostConfig": { + "NetworkMode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8" + }, + "NetworkSettings": { + "Networks": {} + }, + "Mounts": [] } ] diff --git a/discovery/moby/testdata/dockerprom/networks.json b/discovery/moby/testdata/dockerprom/networks.json index 35facd3bb9..75d4442df8 100644 --- a/discovery/moby/testdata/dockerprom/networks.json +++ b/discovery/moby/testdata/dockerprom/networks.json @@ -111,5 +111,59 @@ "Containers": {}, "Options": {}, "Labels": {} + }, + { + "Name": "dockersd_private", + "Id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "Created": "2022-03-25T09:21:17.718370976+08:00", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Options": null, + "Config": [ + { + "Subnet": "172.20.0.1/16" + } + ] + }, + "Internal": false, + "Attachable": false, + "Ingress": false, + "ConfigFrom": { + "Network": "" + }, + "ConfigOnly": false, + "Containers": {}, + "Options": {}, + "Labels": {} + }, + { + "Name": "dockersd_private1", + "Id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "Created": "2022-03-25T09:21:17.718370976+08:00", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Options": null, + "Config": [ + { + "Subnet": "172.21.0.1/16" + } + ] + }, + "Internal": false, + "Attachable": false, + "Ingress": false, + "ConfigFrom": { + "Network": "" + }, + "ConfigOnly": false, + "Containers": {}, + "Options": {}, + "Labels": {} } ] diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go index 16964cfb62..8964da9294 100644 --- a/discovery/openstack/hypervisor.go +++ b/discovery/openstack/hypervisor.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "net" + "strconv" "github.com/go-kit/log" "github.com/gophercloud/gophercloud" @@ -72,7 +73,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group } tg := &targetgroup.Group{ - Source: fmt.Sprintf("OS_" + h.region), + Source: "OS_" + h.region, } // OpenStack API reference // https://developer.openstack.org/api-ref/compute/#list-hypervisors-details @@ -84,7 +85,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group } for _, hypervisor := range hypervisorList { labels := model.LabelSet{} - addr := net.JoinHostPort(hypervisor.HostIP, fmt.Sprintf("%d", h.port)) + addr := net.JoinHostPort(hypervisor.HostIP, strconv.Itoa(h.port)) labels[model.AddressLabel] = model.LabelValue(addr) labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID) labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index 9b28c1d6e1..78c669e6f7 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "net" + "strconv" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -120,7 +121,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, } pager := servers.List(client, opts) tg := &targetgroup.Group{ - Source: fmt.Sprintf("OS_" + i.region), + Source: "OS_" + i.region, } err = pager.EachPage(func(page pagination.Page) (bool, error) { if ctx.Err() != nil { @@ -145,12 +146,18 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, openstackLabelUserID: model.LabelValue(s.UserID), } - flavorID, ok := s.Flavor["id"].(string) - if !ok { - level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string") - continue + flavorName, nameOk := s.Flavor["original_name"].(string) + // "original_name" is only available for microversion >= 2.47. It was added in favor of "id". + if !nameOk { + flavorID, idOk := s.Flavor["id"].(string) + if !idOk { + level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string") + continue + } + labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID) + } else { + labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorName) } - labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID) imageID, ok := s.Image["id"].(string) if ok { @@ -194,7 +201,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok { lbls[openstackLabelPublicIP] = model.LabelValue(val) } - addr = net.JoinHostPort(addr, fmt.Sprintf("%d", i.port)) + addr = net.JoinHostPort(addr, strconv.Itoa(i.port)) lbls[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, lbls) diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index 9e124b6053..2b5ac1b89e 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -84,7 +84,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { }, { "__address__": model.LabelValue("10.0.0.31:0"), - "__meta_openstack_instance_flavor": model.LabelValue("1"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.medium"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"), "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), @@ -96,7 +96,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { }, { "__address__": model.LabelValue("10.0.0.33:0"), - "__meta_openstack_instance_flavor": model.LabelValue("4"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("merp"), @@ -108,7 +108,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { }, { "__address__": model.LabelValue("10.0.0.34:0"), - "__meta_openstack_instance_flavor": model.LabelValue("4"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("merp"), diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index b1267db90e..4518f41166 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -427,13 +427,17 @@ const serverListBody = ` "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "flavor": { - "id": "1", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", - "rel": "bookmark" - } - ] + "vcpus": 2, + "ram": 4096, + "disk": 0, + "ephemeral": 0, + "swap": 0, + "original_name": "m1.medium", + "extra_specs": { + "aggregate_instance_extra_specs:general": "true", + "hw:mem_page_size": "large", + "hw:vif_multiqueue_enabled": "true" + } }, "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", "security_groups": [ @@ -498,13 +502,17 @@ const serverListBody = ` "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", "flavor": { - "id": "4", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", - "rel": "bookmark" - } - ] + "vcpus": 2, + "ram": 4096, + "disk": 0, + "ephemeral": 0, + "swap": 0, + "original_name": "m1.small", + "extra_specs": { + "aggregate_instance_extra_specs:general": "true", + "hw:mem_page_size": "large", + "hw:vif_multiqueue_enabled": "true" + } }, "id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", "security_groups": [ diff --git a/discovery/ovhcloud/dedicated_server.go b/discovery/ovhcloud/dedicated_server.go index bb5dadcd7b..a70857a08b 100644 --- a/discovery/ovhcloud/dedicated_server.go +++ b/discovery/ovhcloud/dedicated_server.go @@ -144,12 +144,12 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou model.InstanceLabel: model.LabelValue(server.Name), dedicatedServerLabelPrefix + "state": model.LabelValue(server.State), dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange), - dedicatedServerLabelPrefix + "link_speed": model.LabelValue(fmt.Sprintf("%d", server.LinkSpeed)), + dedicatedServerLabelPrefix + "link_speed": model.LabelValue(strconv.Itoa(server.LinkSpeed)), dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack), dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)), dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os), dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel), - dedicatedServerLabelPrefix + "server_id": model.LabelValue(fmt.Sprintf("%d", server.ServerID)), + dedicatedServerLabelPrefix + "server_id": model.LabelValue(strconv.FormatInt(server.ServerID, 10)), dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse), dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter), dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name), diff --git a/discovery/ovhcloud/ovhcloud_test.go b/discovery/ovhcloud/ovhcloud_test.go index 53ec9b4459..9c95bf90e6 100644 --- a/discovery/ovhcloud/ovhcloud_test.go +++ b/discovery/ovhcloud/ovhcloud_test.go @@ -66,7 +66,7 @@ endpoint: %s _, err := createClient(&conf) - require.ErrorContains(t, err, "missing application key") + require.ErrorContains(t, err, "missing authentication information") } func TestParseIPs(t *testing.T) { diff --git a/discovery/ovhcloud/vps.go b/discovery/ovhcloud/vps.go index e2d1dee364..58ceeabd87 100644 --- a/discovery/ovhcloud/vps.go +++ b/discovery/ovhcloud/vps.go @@ -19,6 +19,7 @@ import ( "net/netip" "net/url" "path" + "strconv" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -161,21 +162,21 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { model.InstanceLabel: model.LabelValue(server.Name), vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer), vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)), - vpsLabelPrefix + "model_vcore": model.LabelValue(fmt.Sprintf("%d", server.Model.Vcore)), - vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(fmt.Sprintf("%d", server.Model.MaximumAdditionalIP)), + vpsLabelPrefix + "model_vcore": model.LabelValue(strconv.Itoa(server.Model.Vcore)), + vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(strconv.Itoa(server.Model.MaximumAdditionalIP)), vpsLabelPrefix + "version": model.LabelValue(server.Model.Version), vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name), - vpsLabelPrefix + "disk": model.LabelValue(fmt.Sprintf("%d", server.Model.Disk)), - vpsLabelPrefix + "memory": model.LabelValue(fmt.Sprintf("%d", server.Model.Memory)), + vpsLabelPrefix + "disk": model.LabelValue(strconv.Itoa(server.Model.Disk)), + vpsLabelPrefix + "memory": model.LabelValue(strconv.Itoa(server.Model.Memory)), vpsLabelPrefix + "zone": model.LabelValue(server.Zone), vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName), vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster), vpsLabelPrefix + "state": model.LabelValue(server.State), vpsLabelPrefix + "name": model.LabelValue(server.Name), vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode), - vpsLabelPrefix + "memory_limit": model.LabelValue(fmt.Sprintf("%d", server.MemoryLimit)), + vpsLabelPrefix + "memory_limit": model.LabelValue(strconv.Itoa(server.MemoryLimit)), vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType), - vpsLabelPrefix + "vcore": model.LabelValue(fmt.Sprintf("%d", server.Vcore)), + vpsLabelPrefix + "vcore": model.LabelValue(strconv.Itoa(server.Vcore)), vpsLabelPrefix + "ipv4": model.LabelValue(ipv4), vpsLabelPrefix + "ipv6": model.LabelValue(ipv6), } diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go index 8c9ccde0a4..8f89acbf93 100644 --- a/discovery/puppetdb/puppetdb.go +++ b/discovery/puppetdb/puppetdb.go @@ -237,7 +237,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { pdbLabelResource: model.LabelValue(resource.Resource), pdbLabelType: model.LabelValue(resource.Type), pdbLabelTitle: model.LabelValue(resource.Title), - pdbLabelExported: model.LabelValue(fmt.Sprintf("%t", resource.Exported)), + pdbLabelExported: model.LabelValue(strconv.FormatBool(resource.Exported)), pdbLabelFile: model.LabelValue(resource.File), pdbLabelEnvironment: model.LabelValue(resource.Environment), } diff --git a/discovery/scaleway/instance.go b/discovery/scaleway/instance.go index 9dd786c804..2542c63253 100644 --- a/discovery/scaleway/instance.go +++ b/discovery/scaleway/instance.go @@ -174,20 +174,25 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, labels[instanceTagsLabel] = model.LabelValue(tags) } - if server.IPv6 != nil { - labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) + addr := "" + if server.IPv6 != nil { //nolint:staticcheck + labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) //nolint:staticcheck + addr = server.IPv6.Address.String() //nolint:staticcheck } - if server.PublicIP != nil { - labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) + if server.PublicIP != nil { //nolint:staticcheck + labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) //nolint:staticcheck + addr = server.PublicIP.Address.String() //nolint:staticcheck } if server.PrivateIP != nil { labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP) + addr = *server.PrivateIP + } - addr := net.JoinHostPort(*server.PrivateIP, strconv.FormatUint(uint64(d.port), 10)) + if addr != "" { + addr := net.JoinHostPort(addr, strconv.FormatUint(uint64(d.port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) - targets = append(targets, labels) } } diff --git a/discovery/scaleway/instance_test.go b/discovery/scaleway/instance_test.go index d2449d00c9..ae70a9ed25 100644 --- a/discovery/scaleway/instance_test.go +++ b/discovery/scaleway/instance_test.go @@ -60,7 +60,7 @@ api_url: %s tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 2) + require.Len(t, tg.Targets, 3) for i, lbls := range []model.LabelSet{ { @@ -110,6 +110,28 @@ api_url: %s "__meta_scaleway_instance_type": "DEV1-S", "__meta_scaleway_instance_zone": "fr-par-1", }, + { + "__address__": "51.158.183.115:80", + "__meta_scaleway_instance_boot_type": "local", + "__meta_scaleway_instance_hostname": "routed-dualstack", + "__meta_scaleway_instance_id": "4904366a-7e26-4b65-b97b-6392c761247a", + "__meta_scaleway_instance_image_arch": "x86_64", + "__meta_scaleway_instance_image_id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160", + "__meta_scaleway_instance_image_name": "Ubuntu 22.04 Jammy Jellyfish", + "__meta_scaleway_instance_location_cluster_id": "19", + "__meta_scaleway_instance_location_hypervisor_id": "1201", + "__meta_scaleway_instance_location_node_id": "24", + "__meta_scaleway_instance_name": "routed-dualstack", + "__meta_scaleway_instance_organization_id": "20b3d507-96ac-454c-a795-bc731b46b12f", + "__meta_scaleway_instance_project_id": "20b3d507-96ac-454c-a795-bc731b46b12f", + "__meta_scaleway_instance_public_ipv4": "51.158.183.115", + "__meta_scaleway_instance_region": "nl-ams", + "__meta_scaleway_instance_security_group_id": "984414da-9fc2-49c0-a925-fed6266fe092", + "__meta_scaleway_instance_security_group_name": "Default security group", + "__meta_scaleway_instance_status": "running", + "__meta_scaleway_instance_type": "DEV1-S", + "__meta_scaleway_instance_zone": "nl-ams-1", + }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) diff --git a/discovery/scaleway/testdata/instance.json b/discovery/scaleway/testdata/instance.json index f8d35b215c..b433f7598e 100644 --- a/discovery/scaleway/testdata/instance.json +++ b/discovery/scaleway/testdata/instance.json @@ -216,6 +216,146 @@ "placement_group": null, "private_nics": [], "zone": "fr-par-1" + }, + { + "id": "4904366a-7e26-4b65-b97b-6392c761247a", + "name": "routed-dualstack", + "arch": "x86_64", + "commercial_type": "DEV1-S", + "boot_type": "local", + "organization": "20b3d507-96ac-454c-a795-bc731b46b12f", + "project": "20b3d507-96ac-454c-a795-bc731b46b12f", + "hostname": "routed-dualstack", + "image": { + "id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160", + "name": "Ubuntu 22.04 Jammy Jellyfish", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "project": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "root_volume": { + "id": "13d945b9-5e78-4f9d-8ac4-c4bc2fa7c31a", + "name": "Ubuntu 22.04 Jammy Jellyfish", + "volume_type": "unified", + "size": 10000000000 + }, + "extra_volumes": {}, + "public": true, + "arch": "x86_64", + "creation_date": "2024-02-22T15:52:56.037007+00:00", + "modification_date": "2024-02-22T15:52:56.037007+00:00", + "default_bootscript": null, + "from_server": null, + "state": "available", + "tags": [], + "zone": "nl-ams-1" + }, + "volumes": { + "0": { + "boot": false, + "id": "fe85c817-e67e-4e24-8f13-bde3e9f42620", + "name": "Ubuntu 22.04 Jammy Jellyfish", + "volume_type": "l_ssd", + "export_uri": null, + "organization": "20b3d507-96ac-454c-a795-bc731b46b12f", + "project": "20b3d507-96ac-454c-a795-bc731b46b12f", + "server": { + "id": "4904366a-7e26-4b65-b97b-6392c761247a", + "name": "routed-dualstack" + }, + "size": 20000000000, + "state": "available", + "creation_date": "2024-04-19T14:50:14.019739+00:00", + "modification_date": "2024-04-19T14:50:14.019739+00:00", + "tags": [], + "zone": "nl-ams-1" + } + }, + "tags": [], + "state": "running", + "protected": false, + "state_detail": "booted", + "public_ip": { + "id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0", + "address": "51.158.183.115", + "dynamic": false, + "gateway": "62.210.0.1", + "netmask": "32", + "family": "inet", + "provisioning_mode": "dhcp", + "tags": [], + "state": "attached", + "ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e" + }, + "public_ips": [ + { + "id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0", + "address": "51.158.183.115", + "dynamic": false, + "gateway": "62.210.0.1", + "netmask": "32", + "family": "inet", + "provisioning_mode": "dhcp", + "tags": [], + "state": "attached", + "ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e" + }, + { + "id": "f52a8c81-0875-4aee-b96e-eccfc6bec367", + "address": "2001:bc8:1640:1568:dc00:ff:fe21:91b", + "dynamic": false, + "gateway": "fe80::dc00:ff:fe21:91c", + "netmask": "64", + "family": "inet6", + "provisioning_mode": "slaac", + "tags": [], + "state": "attached", + "ipam_id": "40d1e6ea-e932-42f9-8acb-55398bec7ad6" + } + ], + "mac_address": "de:00:00:21:09:1b", + "routed_ip_enabled": true, + "ipv6": null, + "extra_networks": [], + "dynamic_ip_required": false, + "enable_ipv6": false, + "private_ip": null, + "creation_date": "2024-04-19T14:50:14.019739+00:00", + "modification_date": "2024-04-19T14:52:21.181670+00:00", + "bootscript": { + "id": "5a520dda-96d6-4ed2-acd1-1d526b6859fe", + "public": true, + "title": "x86_64 mainline 4.4.182 rev1", + "architecture": "x86_64", + "organization": "11111111-1111-4111-8111-111111111111", + "project": "11111111-1111-4111-8111-111111111111", + "kernel": "http://10.196.2.9/kernel/x86_64-mainline-lts-4.4-4.4.182-rev1/vmlinuz-4.4.182", + "dtb": "", + "initrd": "http://10.196.2.9/initrd/initrd-Linux-x86_64-v3.14.6.gz", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "zone": "nl-ams-1" + }, + "security_group": { + "id": "984414da-9fc2-49c0-a925-fed6266fe092", + "name": "Default security group" + }, + "location": { + "zone_id": "ams1", + "platform_id": "23", + "cluster_id": "19", + "hypervisor_id": "1201", + "node_id": "24" + }, + "maintenances": [], + "allowed_actions": [ + "poweroff", + "terminate", + "reboot", + "stop_in_place", + "backup" + ], + "placement_group": null, + "private_nics": [], + "zone": "nl-ams-1" } ] -} +} \ No newline at end of file diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index e885ef2e80..c8af2f1587 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -20,6 +20,7 @@ import ( "net/http" "net/url" "path" + "strconv" "strings" "time" @@ -269,7 +270,7 @@ func (d *Discovery) getEndpointLabels( model.AddressLabel: model.LabelValue(addr), uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname), uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN), - uyuniLablelSystemID: model.LabelValue(fmt.Sprintf("%d", endpoint.SystemID)), + uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName), uyuniLablelExporter: model.LabelValue(endpoint.ExporterName), diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index 303c7ca6d0..92904dd71c 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -280,17 +280,17 @@ func parseServersetMember(data []byte, path string) (model.LabelSet, error) { labels := model.LabelSet{} labels[serversetPathLabel] = model.LabelValue(path) labels[model.AddressLabel] = model.LabelValue( - net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port))) + net.JoinHostPort(member.ServiceEndpoint.Host, strconv.Itoa(member.ServiceEndpoint.Port))) labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host) - labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port)) + labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.ServiceEndpoint.Port)) for name, endpoint := range member.AdditionalEndpoints { cleanName := model.LabelName(strutil.SanitizeLabelName(name)) labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue( endpoint.Host) labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue( - fmt.Sprintf("%d", endpoint.Port)) + strconv.Itoa(endpoint.Port)) } labels[serversetStatusLabel] = model.LabelValue(member.Status) @@ -321,10 +321,10 @@ func parseNerveMember(data []byte, path string) (model.LabelSet, error) { labels := model.LabelSet{} labels[nervePathLabel] = model.LabelValue(path) labels[model.AddressLabel] = model.LabelValue( - net.JoinHostPort(member.Host, fmt.Sprintf("%d", member.Port))) + net.JoinHostPort(member.Host, strconv.Itoa(member.Port))) labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host) - labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.Port)) + labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.Port)) labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name) return labels, nil diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index b1b9d29858..2bc5b15ef2 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -26,6 +26,7 @@ The Prometheus monitoring server | --web.enable-lifecycle | Enable shutdown and reload via HTTP request. | `false` | | --web.enable-admin-api | Enable API endpoints for admin control actions. | `false` | | --web.enable-remote-write-receiver | Enable API endpoint accepting remote write requests. | `false` | +| --web.remote-write-receiver.accepted-protobuf-messages | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` | | --web.console.templates | Path to the console template directory, available at /consoles. | `consoles` | | --web.console.libraries | Path to the console library directory. | `console_libraries` | | --web.page-title | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | @@ -48,8 +49,9 @@ The Prometheus monitoring server | --rules.alert.for-outage-tolerance | Max time to tolerate prometheus outage for restoring "for" state of alert. Use with server mode only. | `1h` | | --rules.alert.for-grace-period | Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. Use with server mode only. | `10m` | | --rules.alert.resend-delay | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` | -| --rules.max-concurrent-evals | Global concurrency limit for independent rules that can run concurrently. Use with server mode only. | `4` | +| --rules.max-concurrent-evals | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` | | --alertmanager.notification-queue-capacity | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` | +| --alertmanager.drain-notification-queue-on-shutdown | Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down. Use with server mode only. | `true` | | --query.lookback-delta | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` | | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 3eceed48f2..443cd3f0cb 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -566,6 +566,7 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | +| --sandbox-dir-root | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | | --min-time | Minimum timestamp to dump. | `-9223372036854775808` | | --max-time | Maximum timestamp to dump. | `9223372036854775807` | | --match | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | @@ -584,7 +585,7 @@ Dump samples from a TSDB. ##### `promtool tsdb dump-openmetrics` -[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped. +[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics. @@ -592,6 +593,7 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | +| --sandbox-dir-root | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | | --min-time | Minimum timestamp to dump. | `-9223372036854775808` | | --max-time | Maximum timestamp to dump. | `9223372036854775807` | | --match | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 2f2e07a0c2..35976871b9 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -70,6 +70,10 @@ global: # How frequently to evaluate rules. [ evaluation_interval: | default = 1m ] + + # Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received. + # Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping. + [ rule_query_offset: | default = 0s ] # The labels to add to any time series or alerts when communicating with # external systems (federation, remote storage, Alertmanager). @@ -117,6 +121,12 @@ global: # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] +runtime: + # Configure the Go garbage collector GOGC parameter + # See: https://tip.golang.org/doc/gc-guide#GOGC + # Lowering this number increases CPU usage. + [ gogc: | default = 75 ] + # Rule files specifies a list of globs. Rules and alerts are read from # all matching files. rule_files: @@ -931,6 +941,9 @@ tls_config: # The host to use if the container is in host networking mode. [ host_networking_host: | default = "localhost" ] +# Match the first network if the container has multiple networks defined, thus avoiding collecting duplicate targets. +[ match_first_network: | default = true ] + # Optional filters to limit the discovery process to a subset of available # resources. # The available filters are listed in the upstream documentation: @@ -1219,6 +1232,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_ec2_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present * `__meta_ec2_owner_id`: the ID of the AWS account that owns the EC2 instance * `__meta_ec2_platform`: the Operating System platform, set to 'windows' on Windows servers, absent otherwise +* `__meta_ec2_primary_ipv6_addresses`: comma separated list of the Primary IPv6 addresses of the instance, if present. The list is ordered based on the position of each corresponding network interface in the attachment order. * `__meta_ec2_primary_subnet_id`: the subnet ID of the primary network interface, if available * `__meta_ec2_private_dns_name`: the private DNS name of the instance, if available * `__meta_ec2_private_ip`: the private IP address of the instance, if present @@ -1349,7 +1363,7 @@ interface. The following meta labels are available on targets during [relabeling](#relabel_config): * `__meta_openstack_address_pool`: the pool of the private IP. -* `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance. +* `__meta_openstack_instance_flavor`: the flavor name of the OpenStack instance, or the flavor ID if the flavor name isn't available. * `__meta_openstack_instance_id`: the OpenStack instance ID. * `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using. * `__meta_openstack_instance_name`: the OpenStack instance name. @@ -1357,7 +1371,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_openstack_private_ip`: the private IP of the OpenStack instance. * `__meta_openstack_project_id`: the project (tenant) owning this instance. * `__meta_openstack_public_ip`: the public IP of the OpenStack instance. -* `__meta_openstack_tag_`: each tag value of the instance. +* `__meta_openstack_tag_`: each metadata item of the instance, with any unsupported characters converted to an underscore. * `__meta_openstack_user_id`: the user account owning the tenant. See below for the configuration options for OpenStack discovery: @@ -1467,6 +1481,7 @@ For OVHcloud's [public cloud instances](https://www.ovhcloud.com/en/public-cloud * `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server * `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server * `__meta_ovhcloud_dedicated_server_name`: the name of the server +* `__meta_ovhcloud_dedicated_server_no_intervention`: whether datacenter intervention is disabled for the server * `__meta_ovhcloud_dedicated_server_os`: the operating system of the server * `__meta_ovhcloud_dedicated_server_rack`: the rack of the server * `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server @@ -1597,7 +1612,16 @@ and serves as an interface to plug in custom service discovery mechanisms. It reads a set of files containing a list of zero or more ``s. Changes to all defined files are detected via disk watches -and applied immediately. Files may be provided in YAML or JSON format. Only +and applied immediately. + +While those individual files are watched for changes, +the parent directory is also watched implicitly. This is to handle [atomic +renaming](https://github.com/fsnotify/fsnotify/blob/c1467c02fba575afdb5f4201072ab8403bbf00f4/README.md?plain=1#L128) efficiently and to detect new files that match the configured globs. +This may cause issues if the parent directory contains a large number of other files, +as each of these files will be watched too, even though the events related +to them are not relevant. + +Files may be provided in YAML or JSON format. Only changes resulting in well-formed target groups are applied. Files must contain a list of static configs, using these formats: @@ -2952,9 +2976,10 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_scaleway_instance_type`: commercial type of the server * `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction)) -This role uses the private IPv4 address by default. This can be +This role uses the first address it finds in the following order: private IPv4, public IPv4, public IPv6. This can be changed with relabeling, as demonstrated in [the Prometheus scaleway-sd configuration file](/documentation/examples/prometheus-scaleway.yml). +Should an instance have no address before relabeling, it will not be added to the target list and you will not be able to relabel it. #### Baremetal role @@ -3550,6 +3575,17 @@ this functionality. # The URL of the endpoint to send samples to. url: +# protobuf message to use when writing to the remote write endpoint. +# +# * The `prometheus.WriteRequest` represents the message introduced in Remote Write 1.0, which +# will be deprecated eventually. +# * The `io.prometheus.write.v2.Request` was introduced in Remote Write 2.0 and replaces the former, +# by improving efficiency and sending metadata, created timestamp and native histograms by default. +# +# Before changing this value, consult with your remote storage provider (or test) what message it supports. +# Read more on https://prometheus.io/docs/specs/remote_write_spec_2_0/#io-prometheus-write-v2-request +[ protobuf_message: | default = prometheus.WriteRequest ] + # Timeout for requests to the remote write endpoint. [ remote_timeout: | default = 30s ] @@ -3571,6 +3607,7 @@ write_relabel_configs: [ send_exemplars: | default = false ] # Enables sending of native histograms, also known as sparse histograms, over remote write. +# For the `io.prometheus.write.v2.Request` message, this option is noop (always true). [ send_native_histograms: | default = false ] # Sets the `Authorization` header on every remote write request with the @@ -3584,7 +3621,7 @@ basic_auth: # Optional `Authorization` header configuration. authorization: # Sets the authentication type. - [ type: | default: Bearer ] + [ type: | default = Bearer ] # Sets the credentials. It is mutually exclusive with # `credentials_file`. [ credentials: ] @@ -3648,7 +3685,7 @@ tls_config: # contain port numbers. [ no_proxy: ] # Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) -[ proxy_from_environment: | default: false ] +[ proxy_from_environment: | default = false ] # Specifies headers to send to proxies during CONNECT requests. [ proxy_connect_header: [ : [, ...] ] ] @@ -3657,7 +3694,7 @@ tls_config: [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default = true ] # Configures the queue used to write to remote storage. queue_config: @@ -3672,7 +3709,8 @@ queue_config: [ min_shards: | default = 1 ] # Maximum number of samples per send. [ max_samples_per_send: | default = 2000] - # Maximum time a sample will wait in buffer. + # Maximum time a sample will wait for a send. The sample might wait less + # if the buffer is full. Further time might pass due to potential retries. [ batch_send_deadline: | default = 5s ] # Initial retry delay. Gets doubled for every retry. [ min_backoff: | default = 30ms ] @@ -3686,7 +3724,10 @@ queue_config: # which means that all samples are sent. [ sample_age_limit: | default = 0s ] -# Configures the sending of series metadata to remote storage. +# Configures the sending of series metadata to remote storage +# if the `prometheus.WriteRequest` message was chosen. When +# `io.prometheus.write.v2.Request` is used, metadata is always sent. +# # Metadata configuration is subject to change at any point # or be removed in future releases. metadata_config: @@ -3800,6 +3841,10 @@ NOTE: Out-of-order ingestion is an experimental feature, but you do not need any # into the TSDB, i.e. it is an in-order sample or an out-of-order/out-of-bounds sample # that is within the out-of-order window, or (b) too-old, i.e. not in-order # and before the out-of-order window. +# +# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows +# the agent's WAL to accept out-of-order samples that fall within the specified time window relative +# to the timestamp of the last appended sample for the same series. [ out_of_order_time_window: | default = 0s ] ``` diff --git a/docs/configuration/recording_rules.md b/docs/configuration/recording_rules.md index 48ab951f94..9aa226bbc0 100644 --- a/docs/configuration/recording_rules.md +++ b/docs/configuration/recording_rules.md @@ -86,6 +86,9 @@ name: # rule can produce. 0 is no limit. [ limit: | default = 0 ] +# Offset the rule evaluation timestamp of this particular group by the specified duration into the past. +[ query_offset: | default = global.rule_query_offset ] + rules: [ - ... ] ``` @@ -148,6 +151,9 @@ the rule, active, pending, or inactive, are cleared as well. The event will be recorded as an error in the evaluation, and as such no stale markers are written. +# Rule query offset +This is useful to ensure the underlying metrics have been received and stored in Prometheus. Metric availability delays are more likely to occur when Prometheus is running as a remote write target due to the nature of distributed systems, but can also occur when there's anomalies with scraping and/or short evaluation intervals. + # Failed rule evaluations due to slow evaluation If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group. diff --git a/docs/feature_flags.md b/docs/feature_flags.md index b6b201e5b6..2c0ebd8c20 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -230,3 +230,13 @@ The number of concurrent rule evaluations can be configured with `--rules.max-co Enables the new experimental web UI instead of the old and stable web UI. The new UI is a complete rewrite and aims to be cleaner, less cluttered, and more modern under the hood. It is not feature complete yet and is still under active development. `--enable-feature=new-ui` + +## Metadata WAL Records + +`--enable-feature=metadata-wal-records` + +When enabled, Prometheus will store metadata in-memory and keep track of +metadata changes as WAL records on a per-series basis. + +This must be used if +you are also using remote write 2.0 as it will only gather metadata from the WAL. diff --git a/docs/installation.md b/docs/installation.md index 28f64c0f95..c8e359e780 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -31,11 +31,19 @@ production deployments it is highly recommended to use a [named volume](https://docs.docker.com/storage/volumes/) to ease managing the data on Prometheus upgrades. -To provide your own configuration, there are several options. Here are -two examples. +### Setting command line parameters + +The Docker image is started with a number of default command line parameters, which +can be found in the [Dockerfile](https://github.com/prometheus/prometheus/blob/main/Dockerfile) (adjust the link to correspond with the version in use). + +If you want to add extra command line parameters to the `docker run` command, +you will need to re-add these yourself as they will be overwritten. ### Volumes & bind-mount +To provide your own configuration, there are several options. Here are +two examples. + Bind-mount your `prometheus.yml` from the host by running: ```bash diff --git a/docs/querying/api.md b/docs/querying/api.md index 46e79181ed..f71bfc1586 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -25,8 +25,10 @@ Other non-`2xx` codes may be returned for errors occurring before the API endpoint is reached. An array of warnings may be returned if there are errors that do -not inhibit the request execution. All of the data that was successfully -collected will be returned in the data field. +not inhibit the request execution. An additional array of info-level +annotations may be returned for potential query issues that may or may +not be false positives. All of the data that was successfully collected +will be returned in the data field. The JSON response envelope format is as follows: @@ -40,9 +42,11 @@ The JSON response envelope format is as follows: "errorType": "", "error": "", - // Only if there were warnings while executing the request. + // Only set if there were warnings while executing the request. // There will still be data in the data field. - "warnings": [""] + "warnings": [""], + // Only set if there were info-level annnotations while executing the request. + "infos": [""] } ``` @@ -452,7 +456,7 @@ raw numbers. The keys `"histogram"` and `"histograms"` only show up if the experimental native histograms are present in the response. Their placeholder `` -is explained in detail in its own section below. +is explained in detail in its own section below. ### Range vectors @@ -470,9 +474,12 @@ Range vectors are returned as result type `matrix`. The corresponding ] ``` -Each series could have the `"values"` key, or the `"histograms"` key, or both. +Each series could have the `"values"` key, or the `"histograms"` key, or both. For a given timestamp, there will only be one sample of either float or histogram type. +Series are returned sorted by `metric`. Functions such as [`sort`](functions.md#sort) +and [`sort_by_label`](functions.md#sort_by_label) have no effect for range vectors. + ### Instant vectors Instant vectors are returned as result type `vector`. The corresponding @@ -491,6 +498,10 @@ Instant vectors are returned as result type `vector`. The corresponding Each series could have the `"value"` key, or the `"histogram"` key, but not both. +Series are not guaranteed to be returned in any particular order unless a function +such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)` +is used. + ### Scalars Scalar results are returned as result type `scalar`. The corresponding @@ -682,7 +693,8 @@ URL query parameters: - `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. - `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done. - `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. -- `exclude_alerts=`: only return rules, do not return active alerts. +- `exclude_alerts=`: only return rules, do not return active alerts. +- `match[]=`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional. ```json $ curl http://localhost:9090/api/v1/rules @@ -1302,7 +1314,7 @@ endpoint is `/api/v1/write`. Find more details [here](../storage.md#overview). ## OTLP Receiver -Prometheus can be configured as a receiver for the OTLP Metrics protocol. This +Prometheus can be configured as a receiver for the OTLP Metrics protocol. This is not considered an efficient way of ingesting samples. Use it with caution for specific low-volume use cases. It is not suitable for replacing the ingestion via scraping. diff --git a/docs/querying/examples.md b/docs/querying/examples.md index 957c2f5f98..8287ff6f62 100644 --- a/docs/querying/examples.md +++ b/docs/querying/examples.md @@ -95,3 +95,13 @@ Assuming this metric contains one time series per running instance, you could count the number of running instances per application like this: count by (app) (instance_cpu_time_ns) + +If we are exploring some metrics for their labels, to e.g. be able to aggregate +over some of them, we could use the following: + + limitk(10, app_foo_metric_bar) + +Alternatively, if we wanted the returned timeseries to be more evenly sampled, +we could use the following to get approximately 10% of them: + + limit_ratio(0.1, app_foo_metric_bar) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index c9e65fe6cc..9a552f697a 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -79,7 +79,12 @@ labels of the 1-element output vector from the input vector. ## `ceil()` `ceil(v instant-vector)` rounds the sample values of all elements in `v` up to -the nearest integer. +the nearest integer value greater than or equal to v. + +* `ceil(+Inf) = +Inf` +* `ceil(±0) = ±0` +* `ceil(1.49) = 2.0` +* `ceil(1.78) = 2.0` ## `changes()` @@ -173,7 +178,12 @@ Special cases are: ## `floor()` `floor(v instant-vector)` rounds the sample values of all elements in `v` down -to the nearest integer. +to the nearest integer value smaller than or equal to v. + +* `floor(+Inf) = +Inf` +* `floor(±0) = ±0` +* `floor(1.49) = 1.0` +* `floor(1.78) = 1.0` ## `histogram_avg()` @@ -596,10 +606,14 @@ have exactly one element, `scalar` will return `NaN`. `sort(v instant-vector)` returns vector elements sorted by their sample values, in ascending order. Native histograms are sorted by their sum of observations. +Please note that `sort` only affects the results of instant queries, as range query results always have a fixed output ordering. + ## `sort_desc()` Same as `sort`, but sorts in descending order. +Like `sort`, `sort_desc` only affects the results of instant queries, as range query results always have a fixed output ordering. + ## `sort_by_label()` **This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** diff --git a/docs/querying/operators.md b/docs/querying/operators.md index b92bdd94ac..f5f217ff63 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -230,6 +230,8 @@ vector of fewer elements with aggregated values: * `bottomk` (smallest k elements by sample value) * `topk` (largest k elements by sample value) * `quantile` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions) +* `limitk` (sample n elements) +* `limit_ratio` (sample elements with approximately 𝑟 ratio if `𝑟 > 0`, and the complement of such samples if `𝑟 = -(1.0 - 𝑟)`) These operators can either be used to aggregate over **all** label dimensions or preserve distinct dimensions by including a `without` or `by` clause. These @@ -249,8 +251,8 @@ all other labels are preserved in the output. `by` does the opposite and drops labels that are not listed in the `by` clause, even if their label values are identical between all elements of the vector. -`parameter` is only required for `count_values`, `quantile`, `topk` and -`bottomk`. +`parameter` is only required for `count_values`, `quantile`, `topk`, +`bottomk`, `limitk` and `limit_ratio`. `count_values` outputs one time series per unique sample value. Each series has an additional label. The name of that label is given by the aggregation @@ -261,11 +263,16 @@ time series is the number of times that sample value was present. the input samples, including the original labels, are returned in the result vector. `by` and `without` are only used to bucket the input vector. +`limitk` and `limit_ratio` also return a subset of the input samples, +including the original labels in the result vector, these are experimental +operators that must be enabled with `--enable-feature=promql-experimental-functions`. + `quantile` calculates the φ-quantile, the value that ranks at number φ*N among the N metric values of the dimensions aggregated over. φ is provided as the aggregation parameter. For example, `quantile(0.5, ...)` calculates the median, `quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. + Example: If the metric `http_requests_total` had time series that fan out by @@ -291,6 +298,33 @@ To get the 5 largest HTTP requests counts across all instances we could write: topk(5, http_requests_total) +To sample 10 timeseries, for example to inspect labels and their values, we +could write: + + limitk(10, http_requests_total) + +To deterministically sample approximately 10% of timeseries we could write: + + limit_ratio(0.1, http_requests_total) + +Given that `limit_ratio()` implements a deterministic sampling algorithm (based +on labels' hash), you can get the _complement_ of the above samples, i.e. +approximately 90%, but precisely those not returned by `limit_ratio(0.1, ...)` +with: + + limit_ratio(-0.9, http_requests_total) + +You can also use this feature to e.g. verify that `avg()` is a representative +aggregation for your samples' values, by checking that the difference between +averaging two samples' subsets is "small" when compared to the standard +deviation. + + abs( + avg(limit_ratio(0.5, http_requests_total)) + - + avg(limit_ratio(-0.5, http_requests_total)) + ) <= bool stddev(http_requests_total) + ## Binary operator precedence The following list shows the precedence of binary operators in Prometheus, from diff --git a/docs/querying/remote_read_api.md b/docs/querying/remote_read_api.md index e3dd133069..efbd08e984 100644 --- a/docs/querying/remote_read_api.md +++ b/docs/querying/remote_read_api.md @@ -5,63 +5,7 @@ sort_rank: 7 # Remote Read API -This is not currently considered part of the stable API and is subject to change -even between non-major version releases of Prometheus. - -## Format overview - -The API response format is JSON. Every successful API request returns a `2xx` -status code. - -Invalid requests that reach the API handlers return a JSON error object -and one of the following HTTP response codes: - -- `400 Bad Request` when parameters are missing or incorrect. -- `422 Unprocessable Entity` when an expression can't be executed - ([RFC4918](https://tools.ietf.org/html/rfc4918#page-78)). -- `503 Service Unavailable` when queries time out or abort. - -Other non-`2xx` codes may be returned for errors occurring before the API -endpoint is reached. - -An array of warnings may be returned if there are errors that do -not inhibit the request execution. All of the data that was successfully -collected will be returned in the data field. - -The JSON response envelope format is as follows: - -``` -{ - "status": "success" | "error", - "data": , - - // Only set if status is "error". The data field may still hold - // additional data. - "errorType": "", - "error": "", - - // Only if there were warnings while executing the request. - // There will still be data in the data field. - "warnings": [""] -} -``` - -Generic placeholders are defined as follows: - -* ``: Input timestamps may be provided either in -[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format or as a Unix timestamp -in seconds, with optional decimal places for sub-second precision. Output -timestamps are always represented as Unix timestamps in seconds. -* ``: Prometheus [time series -selectors](basics.md#time-series-selectors) like `http_requests_total` or -`http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded. -* ``: [Prometheus duration strings](basics.md#time_durations). -For example, `5m` refers to a duration of 5 minutes. -* ``: boolean values (strings `true` and `false`). - -Note: Names of query parameters that may be repeated end with `[]`. - -## Remote Read API +> This is not currently considered part of the stable API and is subject to change even between non-major version releases of Prometheus. This API provides data read functionality from Prometheus. This interface expects [snappy](https://github.com/google/snappy) compression. The API definition is located [here](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto). @@ -79,5 +23,3 @@ This returns a message that includes a list of raw samples. These streamed chunks utilize an XOR algorithm inspired by the [Gorilla](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) compression to encode the chunks. However, it provides resolution to the millisecond instead of to the second. - - diff --git a/docs/storage.md b/docs/storage.md index b4c5b6adad..947960fe12 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -61,8 +61,11 @@ A Prometheus server's data directory looks something like this: Note that a limitation of local storage is that it is not clustered or replicated. Thus, it is not arbitrarily scalable or durable in the face of drive or node outages and should be managed like any other single node -database. The use of RAID is suggested for storage availability, and -[snapshots](querying/api.md#snapshot) are recommended for backups. With proper +database. + +[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups +made without snapshots run the risk of losing data that was recorded since +the last WAL sync, which typically happens every two hours. With proper architecture, it is possible to retain years of data in local storage. Alternatively, external storage may be used via the @@ -84,8 +87,10 @@ or 31 days, whichever is smaller. Prometheus has several flags that configure local storage. The most important are: - `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`. -- `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`. - Overrides `storage.tsdb.retention` if this flag is set to anything other than default. +- `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is + set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention` + nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`. + Supported units: y, w, d, h, m, s, ms. - `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. The oldest data will be removed first. Defaults to `0` or disabled. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only @@ -195,6 +200,9 @@ or time-series database to Prometheus. To do so, the user must first convert the source data into [OpenMetrics](https://openmetrics.io/) format, which is the input format for the backfilling as described below. +Note that native histograms and staleness markers are not supported by this +procedure, as they cannot be represented in the OpenMetrics format. + ### Usage Backfilling can be used via the Promtool command line. Promtool will write the blocks diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go index bfbca7b70c..8ccbafe6f1 100644 --- a/documentation/examples/custom-sd/adapter-usage/main.go +++ b/documentation/examples/custom-sd/adapter-usage/main.go @@ -127,9 +127,9 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target // since the service may be registered remotely through a different node. var addr string if node.ServiceAddress != "" { - addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort)) + addr = net.JoinHostPort(node.ServiceAddress, strconv.Itoa(node.ServicePort)) } else { - addr = net.JoinHostPort(node.Address, fmt.Sprintf("%d", node.ServicePort)) + addr = net.JoinHostPort(node.Address, strconv.Itoa(node.ServicePort)) } target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)} diff --git a/documentation/examples/remote_storage/example_write_adapter/README.md b/documentation/examples/remote_storage/example_write_adapter/README.md index 9748c448db..739cf3be36 100644 --- a/documentation/examples/remote_storage/example_write_adapter/README.md +++ b/documentation/examples/remote_storage/example_write_adapter/README.md @@ -7,6 +7,7 @@ To use it: ``` go build + ./example_write_adapter ``` @@ -15,10 +16,19 @@ go build ```yaml remote_write: - url: "http://localhost:1234/receive" + protobuf_message: "io.prometheus.write.v2.Request" ``` -Then start Prometheus: +or for deprecated Remote Write 1.0 message: + +```yaml +remote_write: + - url: "http://localhost:1234/receive" + protobuf_message: "prometheus.WriteRequest" +``` + +Then start Prometheus (in separate terminal): ``` -./prometheus +./prometheus --enable-feature=metadata-wal-records ``` diff --git a/documentation/examples/remote_storage/example_write_adapter/server.go b/documentation/examples/remote_storage/example_write_adapter/server.go index 48c0a9571f..727a3056d3 100644 --- a/documentation/examples/remote_storage/example_write_adapter/server.go +++ b/documentation/examples/remote_storage/example_write_adapter/server.go @@ -18,44 +18,103 @@ import ( "log" "net/http" - "github.com/prometheus/common/model" - + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage/remote" ) func main() { http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) { - req, err := remote.DecodeWriteRequest(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + enc := r.Header.Get("Content-Encoding") + if enc == "" { + http.Error(w, "missing Content-Encoding header", http.StatusUnsupportedMediaType) + return + } + if enc != "snappy" { + http.Error(w, "unknown encoding, only snappy supported", http.StatusUnsupportedMediaType) return } - for _, ts := range req.Timeseries { - m := make(model.Metric, len(ts.Labels)) - for _, l := range ts.Labels { - m[model.LabelName(l.Name)] = model.LabelValue(l.Value) - } - fmt.Println(m) + contentType := r.Header.Get("Content-Type") + if contentType == "" { + http.Error(w, "missing Content-Type header", http.StatusUnsupportedMediaType) + } - for _, s := range ts.Samples { - fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) - } + defer func() { _ = r.Body.Close() }() - for _, e := range ts.Exemplars { - m := make(model.Metric, len(e.Labels)) - for _, l := range e.Labels { - m[model.LabelName(l.Name)] = model.LabelValue(l.Value) - } - fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp) + // Very simplistic content parsing, see + // storage/remote/write_handler.go#WriteHandler.ServeHTTP for production example. + switch contentType { + case "application/x-protobuf", "application/x-protobuf;proto=prometheus.WriteRequest": + req, err := remote.DecodeWriteRequest(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return } - - for _, hp := range ts.Histograms { - h := remote.HistogramProtoToHistogram(hp) - fmt.Printf("\tHistogram: %s\n", h.String()) + printV1(req) + case "application/x-protobuf;proto=io.prometheus.write.v2.Request": + req, err := remote.DecodeWriteV2Request(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return } + printV2(req) + default: + msg := fmt.Sprintf("Unknown remote write content type: %s", contentType) + fmt.Println(msg) + http.Error(w, msg, http.StatusBadRequest) } }) - log.Fatal(http.ListenAndServe(":1234", nil)) } + +func printV1(req *prompb.WriteRequest) { + b := labels.NewScratchBuilder(0) + for _, ts := range req.Timeseries { + fmt.Println(ts.ToLabels(&b, nil)) + + for _, s := range ts.Samples { + fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) + } + for _, ep := range ts.Exemplars { + e := ep.ToExemplar(&b, nil) + fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp) + } + for _, hp := range ts.Histograms { + if hp.IsFloatHistogram() { + h := hp.ToFloatHistogram() + fmt.Printf("\tHistogram: %s\n", h.String()) + continue + } + h := hp.ToIntHistogram() + fmt.Printf("\tHistogram: %s\n", h.String()) + } + } +} + +func printV2(req *writev2.Request) { + b := labels.NewScratchBuilder(0) + for _, ts := range req.Timeseries { + l := ts.ToLabels(&b, req.Symbols) + m := ts.ToMetadata(req.Symbols) + fmt.Println(l, m) + + for _, s := range ts.Samples { + fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) + } + for _, ep := range ts.Exemplars { + e := ep.ToExemplar(&b, req.Symbols) + fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp) + } + for _, hp := range ts.Histograms { + if hp.IsFloatHistogram() { + h := hp.ToFloatHistogram() + fmt.Printf("\tHistogram: %s\n", h.String()) + continue + } + h := hp.ToIntHistogram() + fmt.Printf("\tHistogram: %s\n", h.String()) + } + } +} diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index dff988131b..2491bbe2db 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,70 +8,67 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.5 - github.com/prometheus/client_golang v1.19.0 - github.com/prometheus/common v0.53.0 - github.com/prometheus/prometheus v0.51.1 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/common v0.55.0 + github.com/prometheus/prometheus v0.52.1 github.com/stretchr/testify v1.9.0 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect - github.com/aws/aws-sdk-go v1.50.32 // indirect + github.com/aws/aws-sdk-go v1.53.16 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v5 v5.2.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect - github.com/hashicorp/go-version v1.6.0 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - go.opentelemetry.io/collector/featuregate v1.3.0 // indirect - go.opentelemetry.io/collector/pdata v1.3.0 // indirect - go.opentelemetry.io/collector/semconv v0.96.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect + go.opentelemetry.io/collector/pdata v1.8.0 // indirect + go.opentelemetry.io/collector/semconv v0.101.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect + go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/otel/trace v1.27.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect - google.golang.org/grpc v1.62.1 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apimachinery v0.29.2 // indirect - k8s.io/client-go v0.29.2 // indirect + k8s.io/apimachinery v0.29.3 // indirect + k8s.io/client-go v0.29.3 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect ) @@ -83,4 +80,10 @@ exclude ( cloud.google.com/go v0.34.0 cloud.google.com/go v0.65.0 cloud.google.com/go v0.82.0 + + // Fixing ambiguous import: found package google.golang.org/genproto/googleapis/api/annotations in multiple modules. + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 ) + +// TODO(bwplotka): Move to main branch commit or perhaps released version. +replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index b145f362f4..9898d75d70 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,17 +1,17 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= -github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= +github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= @@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.50.32 h1:POt81DvegnpQKM4DMDLlHz1CO6OBnEoQ1gRhYFd7QRY= -github.com/aws/aws-sdk-go v1.50.32/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= +github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -35,10 +35,10 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -46,14 +46,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU= -github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= +github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= +github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= -github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -68,8 +68,8 @@ github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -95,16 +95,16 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= -github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8= -github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= -github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -115,10 +115,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -137,40 +135,38 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= -github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= +github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= -github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= +github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= -github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA= -github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928= -github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA= +github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= +github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI= @@ -194,8 +190,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -210,21 +206,23 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.29.0 h1:gDSQWAbKMAQX8db9FDCXHhodQPrJmLcmthjx6m+PyV4= -github.com/linode/linodego v1.29.0/go.mod h1:3k6WvCM10gillgYcnoLqIL23ST27BD9HhMsCJWb3Bpk= +github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= +github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= +github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -243,8 +241,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= -github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= +github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= +github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -258,33 +256,33 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= -github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/prometheus/prometheus v0.51.1 h1:V2e7x2oiUC0Megp26+xjffxBf9EGkyP1iQuGd4VjUSU= -github.com/prometheus/prometheus v0.51.1/go.mod h1:yv4MwOn3yHMQ6MZGHPg/U7Fcyqf+rxqiZfSur6myVtc= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc= +github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -306,21 +304,18 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8 github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opentelemetry.io/collector/featuregate v1.3.0 h1:nrFSx+zfjdisjE9oCx25Aep3nJ9RaUjeE1qFL6eovoU= -go.opentelemetry.io/collector/featuregate v1.3.0/go.mod h1:mm8+xyQfgDmqhyegZRNIQmoKsNnDTwWKFLsdMoXAb7A= -go.opentelemetry.io/collector/pdata v1.3.0 h1:JRYN7tVHYFwmtQhIYbxWeiKSa2L1nCohyAs8sYqKFZo= -go.opentelemetry.io/collector/pdata v1.3.0/go.mod h1:t7W0Undtes53HODPdSujPLTnfSR5fzT+WpL+RTaaayo= -go.opentelemetry.io/collector/semconv v0.96.0 h1:DrZy8BpzJDnN2zFxXRj6BhfGYxNlqpFHBqyuS9fVHRY= -go.opentelemetry.io/collector/semconv v0.96.0/go.mod h1:zOm/U3pgMIWcvrcnPbR9Xx2HinoXj46ERMK8PUV9wrs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= +go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= +go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= +go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -331,16 +326,14 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -353,24 +346,21 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -385,48 +375,38 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= -golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -434,9 +414,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -457,16 +436,16 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= -k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= -k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= -k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= -k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/documentation/prometheus-mixin/alerts.libsonnet b/documentation/prometheus-mixin/alerts.libsonnet index 508d89c244..563daab801 100644 --- a/documentation/prometheus-mixin/alerts.libsonnet +++ b/documentation/prometheus-mixin/alerts.libsonnet @@ -34,6 +34,20 @@ description: 'Prometheus %(prometheusName)s has failed to refresh SD with mechanism {{$labels.mechanism}}.' % $._config, }, }, + { + alert: 'PrometheusKubernetesListWatchFailures', + expr: ||| + increase(prometheus_sd_kubernetes_failures_total{%(prometheusSelector)s}[5m]) > 0 + ||| % $._config, + 'for': '15m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Requests in Kubernetes SD are failing.', + description: 'Kubernetes service discovery of Prometheus %(prometheusName)s is experiencing {{ printf "%%.0f" $value }} failures with LIST/WATCH requests to the Kubernetes API in the last 5 minutes.' % $._config, + }, + }, { alert: 'PrometheusNotificationQueueRunningFull', expr: ||| diff --git a/documentation/prometheus-mixin/config.libsonnet b/documentation/prometheus-mixin/config.libsonnet index ab9079a5e3..70d46a2212 100644 --- a/documentation/prometheus-mixin/config.libsonnet +++ b/documentation/prometheus-mixin/config.libsonnet @@ -44,5 +44,10 @@ // The default refresh time for all dashboards, default to 60s refresh: '60s', }, + + // Opt-out of multi-cluster dashboards by overriding this. + showMultiCluster: true, + // The cluster label to infer the cluster name from. + clusterLabel: 'cluster', }, } diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet index efe53dbac9..2bdd168cc9 100644 --- a/documentation/prometheus-mixin/dashboards.libsonnet +++ b/documentation/prometheus-mixin/dashboards.libsonnet @@ -10,21 +10,32 @@ local template = grafana.template; { grafanaDashboards+:: { 'prometheus.json': - g.dashboard( + local showMultiCluster = $._config.showMultiCluster; + local dashboard = g.dashboard( '%(prefix)sOverview' % $._config.grafanaPrometheus - ) - .addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'cluster') - .addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job') - .addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance') + ); + local templatedDashboard = if showMultiCluster then + dashboard + .addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, $._config.clusterLabel) + .addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job') + .addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance') + else + dashboard + .addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job') + .addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance'); + templatedDashboard .addRow( g.row('Prometheus Stats') .addPanel( g.panel('Prometheus Stats') + - g.tablePanel([ + g.tablePanel(if showMultiCluster then [ 'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', 'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', + ] else [ + 'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})', + 'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})', ], { - cluster: { alias: 'Cluster' }, + cluster: { alias: if showMultiCluster then 'Cluster' else '' }, job: { alias: 'Job' }, instance: { alias: 'Instance' }, version: { alias: 'Version' }, @@ -37,12 +48,18 @@ local template = grafana.template; g.row('Discovery') .addPanel( g.panel('Target Sync') + - g.queryPanel('sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3', '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}') + + g.queryPanel(if showMultiCluster then 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3' + else 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3', + if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}' + else '{{scrape_job}}') + { yaxes: g.yaxes('ms') } ) .addPanel( g.panel('Targets') + - g.queryPanel('sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})', '{{cluster}}:{{job}}:{{instance}}') + + g.queryPanel(if showMultiCluster then 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})' + else 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})', + if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}' + else 'Targets') + g.stack ) ) @@ -50,29 +67,47 @@ local template = grafana.template; g.row('Retrieval') .addPanel( g.panel('Average Scrape Interval Duration') + - g.queryPanel('rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3', '{{cluster}}:{{job}}:{{instance}} {{interval}} configured') + + g.queryPanel(if showMultiCluster then 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' + else 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3', + if showMultiCluster then '{{cluster}}:{{job}}:{{instance}} {{interval}} configured' + else '{{interval}} configured') + { yaxes: g.yaxes('ms') } ) .addPanel( g.panel('Scrape failures') + - g.queryPanel([ + g.queryPanel(if showMultiCluster then [ 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - ], [ + ] else [ + 'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))', + 'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))', + 'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))', + 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))', + 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))', + ], if showMultiCluster then [ 'exceeded body size limit: {{cluster}} {{job}} {{instance}}', 'exceeded sample limit: {{cluster}} {{job}} {{instance}}', 'duplicate timestamp: {{cluster}} {{job}} {{instance}}', 'out of bounds: {{cluster}} {{job}} {{instance}}', 'out of order: {{cluster}} {{job}} {{instance}}', + ] else [ + 'exceeded body size limit: {{job}}', + 'exceeded sample limit: {{job}}', + 'duplicate timestamp: {{job}}', + 'out of bounds: {{job}}', + 'out of order: {{job}}', ]) + g.stack ) .addPanel( g.panel('Appended Samples') + - g.queryPanel('rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])', '{{cluster}} {{job}} {{instance}}') + + g.queryPanel(if showMultiCluster then 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' + else 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])', + if showMultiCluster then '{{cluster}} {{job}} {{instance}}' + else '{{job}} {{instance}}') + g.stack ) ) @@ -80,12 +115,18 @@ local template = grafana.template; g.row('Storage') .addPanel( g.panel('Head Series') + - g.queryPanel('prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head series') + + g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + else 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}', + if showMultiCluster then '{{cluster}} {{job}} {{instance}} head series' + else '{{job}} {{instance}} head series') + g.stack ) .addPanel( g.panel('Head Chunks') + - g.queryPanel('prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head chunks') + + g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + else 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}', + if showMultiCluster then '{{cluster}} {{job}} {{instance}} head chunks' + else '{{job}} {{instance}} head chunks') + g.stack ) ) @@ -93,12 +134,18 @@ local template = grafana.template; g.row('Query') .addPanel( g.panel('Query Rate') + - g.queryPanel('rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', '{{cluster}} {{job}} {{instance}}') + + g.queryPanel(if showMultiCluster then 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' + else 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', + if showMultiCluster then '{{cluster}} {{job}} {{instance}}' + else '{{job}} {{instance}}') + g.stack, ) .addPanel( g.panel('Stage Duration') + - g.queryPanel('max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3', '{{slice}}') + + g.queryPanel(if showMultiCluster then 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' + else 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3', + if showMultiCluster then '{{slice}}' + else '{{slice}}') + { yaxes: g.yaxes('ms') } + g.stack, ) diff --git a/go.mod b/go.mod index 8136dbc7af..ce2f0714a0 100644 --- a/go.mod +++ b/go.mod @@ -3,20 +3,20 @@ module github.com/prometheus/prometheus go 1.21 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 - github.com/Code-Hex/go-generics-cache v1.3.1 - github.com/KimMachineGun/automemlimit v0.5.0 + github.com/Code-Hex/go-generics-cache v1.5.1 + github.com/KimMachineGun/automemlimit v0.6.1 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 - github.com/aws/aws-sdk-go v1.50.32 + github.com/aws/aws-sdk-go v1.53.16 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 - github.com/cespare/xxhash/v2 v2.2.0 + github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.109.0 - github.com/docker/docker v25.0.3+incompatible + github.com/digitalocean/godo v1.117.0 + github.com/docker/docker v26.1.3+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.12.0 github.com/envoyproxy/protoc-gen-validate v1.0.4 @@ -24,66 +24,66 @@ require ( github.com/fsnotify/fsnotify v1.7.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 - github.com/go-openapi/strfmt v0.22.2 + github.com/go-openapi/strfmt v0.23.0 github.com/go-zookeeper/zk v1.0.3 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.6.0 - github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 + github.com/google/pprof v0.0.0-20240528025155-186aa0362fba github.com/google/uuid v1.6.0 - github.com/gophercloud/gophercloud v1.8.0 - github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd + github.com/gophercloud/gophercloud v1.12.0 + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.28.2 - github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 - github.com/hetznercloud/hcloud-go/v2 v2.6.0 + github.com/hashicorp/consul/api v1.29.1 + github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d + github.com/hetznercloud/hcloud-go/v2 v2.9.0 github.com/ionos-cloud/sdk-go/v6 v6.1.11 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.7 + github.com/klauspost/compress v1.17.8 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.30.0 - github.com/miekg/dns v1.1.58 + github.com/linode/linodego v1.35.0 + github.com/miekg/dns v1.1.59 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 - github.com/ovh/go-ovh v1.4.3 + github.com/ovh/go-ovh v1.5.1 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.19.0 - github.com/prometheus/client_model v0.6.0 - github.com/prometheus/common v0.49.1-0.20240306132007-4199f18c3e92 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.54.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.11.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/featuregate v1.4.0 - go.opentelemetry.io/collector/pdata v1.4.0 - go.opentelemetry.io/collector/semconv v0.97.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 - go.opentelemetry.io/otel v1.24.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 - go.opentelemetry.io/otel/sdk v1.24.0 - go.opentelemetry.io/otel/trace v1.24.0 + go.opentelemetry.io/collector/pdata v1.8.0 + go.opentelemetry.io/collector/semconv v0.101.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 + go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 + go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/net v0.22.0 - golang.org/x/oauth2 v0.18.0 - golang.org/x/sync v0.6.0 - golang.org/x/sys v0.18.0 + golang.org/x/net v0.26.0 + golang.org/x/oauth2 v0.21.0 + golang.org/x/sync v0.7.0 + golang.org/x/sys v0.21.0 + golang.org/x/text v0.16.0 golang.org/x/time v0.5.0 - golang.org/x/tools v0.19.0 - google.golang.org/api v0.168.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 - google.golang.org/grpc v1.62.1 - google.golang.org/protobuf v1.33.0 + golang.org/x/tools v0.22.0 + google.golang.org/api v0.183.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 + google.golang.org/grpc v1.64.0 + google.golang.org/protobuf v1.34.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.29.3 @@ -94,17 +94,18 @@ require ( ) require ( - cloud.google.com/go/compute v1.23.4 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect + cloud.google.com/go/auth v0.5.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -114,23 +115,23 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/fatih/color v1.15.0 // indirect + github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.22.2 // indirect - github.com/go-openapi/errors v0.21.1 // indirect + github.com/go-openapi/errors v0.22.0 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/loads v0.21.5 // indirect github.com/go-openapi/spec v0.20.14 // indirect github.com/go-openapi/swag v0.22.9 // indirect github.com/go-openapi/validate v0.23.0 // indirect - github.com/go-resty/resty/v2 v2.11.0 // indirect + github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect - github.com/golang-jwt/jwt/v5 v5.2.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -139,18 +140,17 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-retryablehttp v0.7.4 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/imdario/mergo v0.3.16 // indirect @@ -161,9 +161,10 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -182,19 +183,17 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/proto/otlp v1.1.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/proto/otlp v1.2.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect + golang.org/x/mod v0.18.0 // indirect + golang.org/x/term v0.21.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect diff --git a/go.sum b/go.sum index 79c133682e..956b9d8949 100644 --- a/go.sum +++ b/go.sum @@ -12,16 +12,18 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= +cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= -cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -34,14 +36,14 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= @@ -50,15 +52,15 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1. github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= -github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= +github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/KimMachineGun/automemlimit v0.5.0 h1:BeOe+BbJc8L5chL3OwzVYjVzyvPALdd5wxVVOWuUZmQ= -github.com/KimMachineGun/automemlimit v0.5.0/go.mod h1:di3GCKiu9Y+1fs92erCbUvKzPkNyViN3mA0vti/ykEQ= +github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8= +github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= @@ -90,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.50.32 h1:POt81DvegnpQKM4DMDLlHz1CO6OBnEoQ1gRhYFd7QRY= -github.com/aws/aws-sdk-go v1.50.32/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= +github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= @@ -102,12 +104,12 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -118,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= @@ -141,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU= -github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= +github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= +github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= -github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -178,8 +180,8 @@ github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -214,8 +216,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= -github.com/go-openapi/errors v0.21.1 h1:rVisxQPdETctjlYntm0Ek4dKf68nAQocCloCT50vWuI= -github.com/go-openapi/errors v0.21.1/go.mod h1:LyiY9bgc7AVVh6wtVvMYEyoj3KJYNoRw92mmvnMWgj8= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= @@ -224,14 +226,14 @@ github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRz github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= -github.com/go-openapi/strfmt v0.22.2 h1:DPYOrm6gexCfZZfXUaXFS4+Jw6HAaIIG0SZ5630f8yw= -github.com/go-openapi/strfmt v0.22.2/go.mod h1:HB/b7TCm91rno75Dembc1dFW/0FPLk5CEXsoF9ReNc4= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= -github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8= -github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= @@ -246,8 +248,8 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= -github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= @@ -278,8 +280,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -319,8 +319,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q= -github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g= +github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -332,10 +332,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= -github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= -github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= -github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= +github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -343,21 +343,23 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= -github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= +github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= +github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= +github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= +github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= -github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= +github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= +github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -367,9 +369,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -381,8 +382,8 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= -github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -408,13 +409,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA= -github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928= -github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA= +github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= +github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -453,8 +454,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -471,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.30.0 h1:6HJli+LX7NGu+Sne2G+ux790EkVOWOV/SR4mK3jcs6k= -github.com/linode/linodego v1.30.0/go.mod h1:/46h/XpmWi//oSA92GX2p3FIxb8HbX7grslPPQalR2o= +github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= +github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -490,8 +491,8 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= @@ -499,8 +500,8 @@ github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwU github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= +github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -515,6 +516,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -570,8 +573,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= -github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= +github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= +github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -605,16 +608,16 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= @@ -622,8 +625,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.49.1-0.20240306132007-4199f18c3e92 h1:nuwTDY/15McImfuXcUD6AA3alpUNEXfWws8K/8SXr68= -github.com/prometheus/common v0.49.1-0.20240306132007-4199f18c3e92/go.mod h1:Kxm+EULxRbUkjGU6WFsQqo3ORzB4tyKvlWFOE9mB2sE= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= @@ -642,13 +645,13 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= @@ -720,30 +723,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/featuregate v1.4.0 h1:RWE9M659C9iuUQc4GzBsndkGHG1jIzIY+nZJWvcKy1M= -go.opentelemetry.io/collector/featuregate v1.4.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= -go.opentelemetry.io/collector/pdata v1.4.0 h1:cA6Pr7Z2V7mE+i7FmYpavX7nefzd6H4CICgW0T9aJX0= -go.opentelemetry.io/collector/pdata v1.4.0/go.mod h1:0Ttp4wQinhV5oJTd9MjyvUegmZBO9O0nrlh/+EDLw+Q= -go.opentelemetry.io/collector/semconv v0.97.0 h1:iF3nTfThbiOwz7o5Pocn0dDnDoffd18ijDuf6Mwzi1s= -go.opentelemetry.io/collector/semconv v0.97.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 h1:Mw5xcxMwlqoJd97vwPxA8isEaIoxsta9/Q51+TTJLGE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0/go.mod h1:CQNu9bj7o7mC6U7+CA/schKEYakYXWr79ucDHTMGhCM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= -go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= -go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= +go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= +go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= +go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= +go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -770,9 +771,10 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -807,8 +809,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -852,17 +854,18 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -876,8 +879,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -941,16 +944,18 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -958,17 +963,16 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1021,8 +1025,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= -golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1042,8 +1046,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.168.0 h1:MBRe+Ki4mMN93jhDDbpuRLjRddooArz4FeSObvUMmjY= -google.golang.org/api v0.168.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= +google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1051,8 +1055,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1082,10 +1084,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1104,8 +1106,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1117,9 +1119,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1167,8 +1168,8 @@ k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 19a92b3d5a..2a37ea66d4 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -30,11 +30,12 @@ import ( type FloatHistogram struct { // Counter reset information. CounterResetHint CounterResetHint - // Currently valid schema numbers are -4 <= n <= 8. They are all for - // base-2 bucket schemas, where 1 is a bucket boundary in each case, and - // then each power of two is divided into 2^n logarithmic buckets. Or - // in other words, each bucket boundary is the previous boundary times - // 2^(2^-n). + // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in + // each case, and then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by + // the CustomValues field. Schema int32 // Width of the zero bucket. ZeroThreshold float64 @@ -49,6 +50,16 @@ type FloatHistogram struct { // Observation counts in buckets. Each represents an absolute count and // must be zero or positive. PositiveBuckets, NegativeBuckets []float64 + // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. + // This slice is interned, to be treated as immutable and copied by reference. + // These numbers should be strictly increasing. This field is only used when the + // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans + // and NegativeBuckets fields are not used in that case. + CustomValues []float64 +} + +func (h *FloatHistogram) UsesCustomBuckets() bool { + return IsCustomBucketsSchema(h.Schema) } // Copy returns a deep copy of the Histogram. @@ -56,28 +67,37 @@ func (h *FloatHistogram) Copy() *FloatHistogram { c := FloatHistogram{ CounterResetHint: h.CounterResetHint, Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: h.ZeroCount, Count: h.Count, Sum: h.Sum, } + if h.UsesCustomBuckets() { + if len(h.CustomValues) != 0 { + c.CustomValues = make([]float64, len(h.CustomValues)) + copy(c.CustomValues, h.CustomValues) + } + } else { + c.ZeroThreshold = h.ZeroThreshold + c.ZeroCount = h.ZeroCount + + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]float64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + } + if len(h.PositiveSpans) != 0 { c.PositiveSpans = make([]Span, len(h.PositiveSpans)) copy(c.PositiveSpans, h.PositiveSpans) } - if len(h.NegativeSpans) != 0 { - c.NegativeSpans = make([]Span, len(h.NegativeSpans)) - copy(c.NegativeSpans, h.NegativeSpans) - } if len(h.PositiveBuckets) != 0 { c.PositiveBuckets = make([]float64, len(h.PositiveBuckets)) copy(c.PositiveBuckets, h.PositiveBuckets) } - if len(h.NegativeBuckets) != 0 { - c.NegativeBuckets = make([]float64, len(h.NegativeBuckets)) - copy(c.NegativeBuckets, h.NegativeBuckets) - } return &c } @@ -87,32 +107,53 @@ func (h *FloatHistogram) Copy() *FloatHistogram { func (h *FloatHistogram) CopyTo(to *FloatHistogram) { to.CounterResetHint = h.CounterResetHint to.Schema = h.Schema - to.ZeroThreshold = h.ZeroThreshold - to.ZeroCount = h.ZeroCount to.Count = h.Count to.Sum = h.Sum + if h.UsesCustomBuckets() { + to.ZeroThreshold = 0 + to.ZeroCount = 0 + + to.NegativeSpans = clearIfNotNil(to.NegativeSpans) + to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) + + to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) + copy(to.CustomValues, h.CustomValues) + } else { + to.ZeroThreshold = h.ZeroThreshold + to.ZeroCount = h.ZeroCount + + to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) + copy(to.NegativeSpans, h.NegativeSpans) + + to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) + copy(to.NegativeBuckets, h.NegativeBuckets) + + to.CustomValues = clearIfNotNil(to.CustomValues) + } + to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) copy(to.PositiveSpans, h.PositiveSpans) - to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) - copy(to.NegativeSpans, h.NegativeSpans) - to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) copy(to.PositiveBuckets, h.PositiveBuckets) - - to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) - copy(to.NegativeBuckets, h.NegativeBuckets) } // CopyToSchema works like Copy, but the returned deep copy has the provided // target schema, which must be ≤ the original schema (i.e. it must have a lower -// resolution). +// resolution). This method panics if a custom buckets schema is used in the +// receiving FloatHistogram or as the provided targetSchema. func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { if targetSchema == h.Schema { // Fast path. return h.Copy() } + if h.UsesCustomBuckets() { + panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema)) + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } if targetSchema > h.Schema { panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema)) } @@ -185,6 +226,9 @@ func (h *FloatHistogram) TestExpression() string { if m.ZeroThreshold != 0 { res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold)) } + if m.UsesCustomBuckets() { + res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues)) + } addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { if len(spans) > 1 { @@ -210,14 +254,18 @@ func (h *FloatHistogram) TestExpression() string { return "{{" + strings.Join(res, " ") + "}}" } -// ZeroBucket returns the zero bucket. +// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets. func (h *FloatHistogram) ZeroBucket() Bucket[float64] { + if h.UsesCustomBuckets() { + panic("histograms with custom buckets have no zero bucket") + } return Bucket[float64]{ Lower: -h.ZeroThreshold, Upper: h.ZeroThreshold, LowerInclusive: true, UpperInclusive: true, Count: h.ZeroCount, + // Index is irrelevant for the zero bucket. } } @@ -263,9 +311,18 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { // // The method reconciles differences in the zero threshold and in the schema, and // changes them if needed. The other histogram will not be modified in any case. +// Adding is currently only supported between 2 exponential histograms, or between +// 2 custom buckets histograms with the exact same custom bounds. // // This method returns a pointer to the receiving histogram for convenience. -func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { +func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { + if h.UsesCustomBuckets() != other.UsesCustomBuckets() { + return nil, ErrHistogramsIncompatibleSchema + } + if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) { + return nil, ErrHistogramsIncompatibleBounds + } + switch { case other.CounterResetHint == h.CounterResetHint: // Adding apples to apples, all good. No need to change anything. @@ -290,19 +347,28 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place } - otherZeroCount := h.reconcileZeroBuckets(other) - h.ZeroCount += otherZeroCount + if !h.UsesCustomBuckets() { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount += otherZeroCount + } h.Count += other.Count h.Sum += other.Sum var ( - hPositiveSpans = h.PositiveSpans - hPositiveBuckets = h.PositiveBuckets - hNegativeSpans = h.NegativeSpans - hNegativeBuckets = h.NegativeBuckets - + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets otherPositiveSpans = other.PositiveSpans otherPositiveBuckets = other.PositiveBuckets + ) + + if h.UsesCustomBuckets() { + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + return h, nil + } + + var ( + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets otherNegativeSpans = other.NegativeSpans otherNegativeBuckets = other.NegativeBuckets ) @@ -321,24 +387,40 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) - return h + return h, nil } // Sub works like Add but subtracts the other histogram. -func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { - otherZeroCount := h.reconcileZeroBuckets(other) - h.ZeroCount -= otherZeroCount +func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) { + if h.UsesCustomBuckets() != other.UsesCustomBuckets() { + return nil, ErrHistogramsIncompatibleSchema + } + if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) { + return nil, ErrHistogramsIncompatibleBounds + } + + if !h.UsesCustomBuckets() { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount -= otherZeroCount + } h.Count -= other.Count h.Sum -= other.Sum var ( - hPositiveSpans = h.PositiveSpans - hPositiveBuckets = h.PositiveBuckets - hNegativeSpans = h.NegativeSpans - hNegativeBuckets = h.NegativeBuckets - + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets otherPositiveSpans = other.PositiveSpans otherPositiveBuckets = other.PositiveBuckets + ) + + if h.UsesCustomBuckets() { + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + return h, nil + } + + var ( + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets otherNegativeSpans = other.NegativeSpans otherNegativeBuckets = other.NegativeBuckets ) @@ -356,7 +438,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) - return h + return h, nil } // Equals returns true if the given float histogram matches exactly. @@ -365,29 +447,42 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { // but they must represent the same bucket layout to match. // Sum, Count, ZeroCount and bucket values are compared based on their bit patterns // because this method is about data equality rather than mathematical equality. +// We ignore fields that are not used based on the exponential / custom buckets schema, +// but check fields where differences may cause unintended behaviour even if they are not +// supposed to be used according to the schema. func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { if h2 == nil { return false } - if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || - math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) || + if h.Schema != h2.Schema || math.Float64bits(h.Count) != math.Float64bits(h2.Count) || math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { return false } + if h.UsesCustomBuckets() { + if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) { + return false + } + } + + if h.ZeroThreshold != h2.ZeroThreshold || + math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) { + return false + } + + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { return false } - if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { - return false - } - - if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { - return false - } - if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { return false } @@ -403,6 +498,7 @@ func (h *FloatHistogram) Size() int { negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32). posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64). negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64). + customBoundSize := len(h.CustomValues) * 8 // 8 bytes (float64). // Total size of the struct. @@ -417,9 +513,10 @@ func (h *FloatHistogram) Size() int { // fh.NegativeSpans is 24 bytes. // fh.PositiveBuckets is 24 bytes. // fh.NegativeBuckets is 24 bytes. - structSize := 144 + // fh.CustomValues is 24 bytes. + structSize := 168 - return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize } // Compact eliminates empty buckets at the beginning and end of each span, then @@ -504,6 +601,12 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { if h.Count < previous.Count { return true } + if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) { + // Mark that something has changed or that the application has been restarted. However, this does + // not matter so much since the change in schema will be handled directly in the chunks and PromQL + // functions. + return true + } if h.Schema > previous.Schema { return true } @@ -609,7 +712,7 @@ func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] { // positive buckets in descending order (starting at the highest bucket and // going down towards the zero bucket). func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { - it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) + it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues) return &it } @@ -617,7 +720,7 @@ func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] // negative buckets in ascending order (starting at the lowest bucket and going // up towards the zero bucket). func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] { - it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) + it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil) return &it } @@ -629,7 +732,7 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ h: h, - leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false), + leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil), rightIter: h.floatBucketIterator(true, 0, h.Schema), state: -1, } @@ -643,30 +746,52 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ h: h, - leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true), + leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues), rightIter: h.floatBucketIterator(false, 0, h.Schema), state: -1, } } // Validate validates consistency between span and bucket slices. Also, buckets are checked -// against negative values. +// against negative values. We check to make sure there are no unexpected fields or field values +// based on the exponential / custom buckets schema. // We do not check for h.Count being at least as large as the sum of the // counts in the buckets because floating point precision issues can // create false positives here. func (h *FloatHistogram) Validate() error { - if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return fmt.Errorf("negative side: %w", err) - } - if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return fmt.Errorf("positive side: %w", err) - } var nCount, pCount float64 - err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) - if err != nil { - return fmt.Errorf("negative side: %w", err) + if h.UsesCustomBuckets() { + if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("custom buckets: %w", err) + } + if h.ZeroCount != 0 { + return fmt.Errorf("custom buckets: must have zero count of 0") + } + if h.ZeroThreshold != 0 { + return fmt.Errorf("custom buckets: must have zero threshold of 0") + } + if len(h.NegativeSpans) > 0 { + return fmt.Errorf("custom buckets: must not have negative spans") + } + if len(h.NegativeBuckets) > 0 { + return fmt.Errorf("custom buckets: must not have negative buckets") + } + } else { + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + if h.CustomValues != nil { + return fmt.Errorf("histogram with exponential schema must not have custom bounds") + } } - err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false) + err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) if err != nil { return fmt.Errorf("positive side: %w", err) } @@ -790,17 +915,25 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { // If positive is true, the returned iterator iterates through the positive // buckets, otherwise through the negative buckets. // -// If absoluteStartValue is < the lowest absolute value of any upper bucket -// boundary, the iterator starts with the first bucket. Otherwise, it will skip -// all buckets with an absolute value of their upper boundary ≤ -// absoluteStartValue. +// Only for exponential schemas, if absoluteStartValue is < the lowest absolute +// value of any upper bucket boundary, the iterator starts with the first bucket. +// Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤ +// absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and +// no buckets are skipped. // // targetSchema must be ≤ the schema of FloatHistogram (and of course within the // legal values for schemas in general). The buckets are merged to match the -// targetSchema prior to iterating (without mutating FloatHistogram). +// targetSchema prior to iterating (without mutating FloatHistogram), but custom buckets +// schemas cannot be merged with other schemas. func (h *FloatHistogram) floatBucketIterator( positive bool, absoluteStartValue float64, targetSchema int32, ) floatBucketIterator { + if h.UsesCustomBuckets() && targetSchema != h.Schema { + panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema")) + } + if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) { + panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema")) + } if targetSchema > h.Schema { panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) } @@ -816,6 +949,7 @@ func (h *FloatHistogram) floatBucketIterator( if positive { i.spans = h.PositiveSpans i.buckets = h.PositiveBuckets + i.customValues = h.CustomValues } else { i.spans = h.NegativeSpans i.buckets = h.NegativeBuckets @@ -825,14 +959,15 @@ func (h *FloatHistogram) floatBucketIterator( // reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators. func newReverseFloatBucketIterator( - spans []Span, buckets []float64, schema int32, positive bool, + spans []Span, buckets []float64, schema int32, positive bool, customValues []float64, ) reverseFloatBucketIterator { r := reverseFloatBucketIterator{ baseBucketIterator: baseBucketIterator[float64, float64]{ - schema: schema, - spans: spans, - buckets: buckets, - positive: positive, + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + customValues: customValues, }, } @@ -946,9 +1081,9 @@ func (i *floatBucketIterator) Next() bool { } } - // Skip buckets before absoluteStartValue. + // Skip buckets before absoluteStartValue for exponential schemas. // TODO(beorn7): Maybe do something more efficient than this recursive call. - if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { + if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue { return i.Next() } i.boundReachedStartValue = true @@ -1010,14 +1145,7 @@ func (i *allFloatBucketIterator) Next() bool { case 0: i.state = 1 if i.h.ZeroCount > 0 { - i.currBucket = Bucket[float64]{ - Lower: -i.h.ZeroThreshold, - Upper: i.h.ZeroThreshold, - LowerInclusive: true, - UpperInclusive: true, - Count: i.h.ZeroCount, - // Index is irrelevant for the zero bucket. - } + i.currBucket = i.h.ZeroBucket() return true } return i.Next() @@ -1076,7 +1204,7 @@ func addBuckets( for _, spanB := range spansB { indexB += spanB.Offset for j := 0; j < int(spanB.Length); j++ { - if lowerThanThreshold && getBound(indexB, schema) <= threshold { + if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold { goto nextLoop } lowerThanThreshold = false @@ -1177,7 +1305,7 @@ func addBuckets( return spansA, bucketsA } -func floatBucketsMatch(b1, b2 []float64) bool { +func FloatBucketsMatch(b1, b2 []float64) bool { if len(b1) != len(b2) { return false } @@ -1191,7 +1319,15 @@ func floatBucketsMatch(b1, b2 []float64) bool { // ReduceResolution reduces the float histogram's spans, buckets into target schema. // The target schema must be smaller than the current float histogram's schema. +// This will panic if the histogram has custom buckets or if the target schema is +// a custom buckets schema. func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { + if h.UsesCustomBuckets() { + panic("cannot reduce resolution when there are custom buckets") + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } if targetSchema >= h.Schema { panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) } diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 49fb77ab0b..1558a6d679 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -14,9 +14,9 @@ package histogram import ( - "fmt" "math" "math/rand" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -131,6 +131,46 @@ func TestFloatHistogramMul(t *testing.T) { NegativeBuckets: []float64{9, 3, 15, 18}, }, }, + { + "no-op with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + 1, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + }, + { + "triple with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + 3, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 90, + Sum: 69, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{3, 0, 9, 12, 21}, + CustomValues: []float64{1, 2, 3, 4}, + }, + }, } for _, c := range cases { @@ -178,6 +218,21 @@ func TestFloatHistogramCopy(t *testing.T) { }, expected: &FloatHistogram{}, }, + { + name: "with custom buckets", + orig: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomValues: []float64{1, 2, 3}, + }, + expected: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomValues: []float64{1, 2, 3}, + }, + }, } for _, tcase := range cases { @@ -228,6 +283,21 @@ func TestFloatHistogramCopyTo(t *testing.T) { }, expected: &FloatHistogram{}, }, + { + name: "with custom buckets", + orig: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomValues: []float64{1, 2, 3}, + }, + expected: &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{1, 3, -3, 42}, + CustomValues: []float64{1, 2, 3}, + }, + }, } for _, tcase := range cases { @@ -339,6 +409,46 @@ func TestFloatHistogramDiv(t *testing.T) { NegativeBuckets: []float64{1.5, 0.5, 2.5, 3}, }, }, + { + "no-op with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + 1, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + }, + { + "half with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + 2, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 11.5, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0.5, 0, 1.5, 2, 3.5}, + CustomValues: []float64{1, 2, 3, 4}, + }, + }, } for _, c := range cases { @@ -937,6 +1047,138 @@ func TestFloatHistogramDetectReset(t *testing.T) { }, true, }, + { + "no buckets to some buckets with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + CustomValues: []float64{1, 2, 3}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + false, + }, + { + "some buckets to no buckets with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + CustomValues: []float64{1, 2, 3}, + }, + true, + }, + { + "one bucket appears, nothing else changes with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + false, + }, + { + "one bucket disappears, nothing else changes with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + true, + }, + { + "an unpopulated bucket disappears, nothing else changes with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + false, + }, + { + "one positive bucket goes up with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.3, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + false, + }, + { + "one positive bucket goes down with custom bounds", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.1, 0.1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + true, + }, } for _, c := range cases { @@ -1230,6 +1472,70 @@ func TestFloatHistogramCompact(t *testing.T) { NegativeBuckets: []float64{2, 3}, }, }, + { + "nothing should happen with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + CustomValues: []float64{1, 2, 3}, + }, + }, + { + "eliminate zero offsets with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 1}, {0, 3}, {0, 1}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomValues: []float64{1, 2, 3, 4}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 5}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomValues: []float64{1, 2, 3, 4}, + }, + }, + { + "eliminate multiple zero length spans with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomValues: []float64{1, 2, 3, 4}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 2}, {9, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomValues: []float64{1, 2, 3, 4}, + }, + }, + { + "cut empty buckets at start and end with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 4}, {5, 6}}, + PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3, 0, 0, 0}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }, + 0, + &FloatHistogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{2, 2}, {5, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }, + }, } for _, c := range cases { @@ -1245,6 +1551,7 @@ func TestFloatHistogramAdd(t *testing.T) { cases := []struct { name string in1, in2, expected *FloatHistogram + expErrMsg string }{ { "same bucket layout", @@ -1278,6 +1585,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 2}, {3, 2}}, NegativeBuckets: []float64{4, 2, 9, 10}, }, + "", }, { "same bucket layout, defined differently", @@ -1311,6 +1619,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 5}, {0, 2}}, NegativeBuckets: []float64{4, 2, 0, 0, 0, 9, 10}, }, + "", }, { "non-overlapping spans", @@ -1344,9 +1653,10 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, }, + "", }, { - "non-overlapping inverted order", + "non-overlapping spans inverted order", &FloatHistogram{ ZeroThreshold: 0.01, ZeroCount: 8, @@ -1377,6 +1687,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{-6, 2}, {1, 2}, {4, 2}, {3, 2}}, NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, }, + "", }, { "overlapping spans", @@ -1410,6 +1721,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "overlapping spans inverted order", @@ -1443,6 +1755,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "schema change", @@ -1478,6 +1791,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero bucket in first histogram", @@ -1511,6 +1825,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero bucket in second histogram", @@ -1544,6 +1859,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero threshold in first histogram ends up inside a populated bucket of second histogram", @@ -1577,6 +1893,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "larger zero threshold in second histogram ends up inside a populated bucket of first histogram", @@ -1610,6 +1927,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "schema change combined with larger zero bucket in second histogram", @@ -1645,6 +1963,7 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", }, { "schema change combined with larger zero bucket in first histogram", @@ -1680,36 +1999,261 @@ func TestFloatHistogramAdd(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, }, + "", + }, + { + "same custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 26, + Sum: 3.579, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 5, 7, 13}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "same custom bucket layout, defined differently", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {1, 1}, {0, 2}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {1, 2}, {0, 1}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 26, + Sum: 3.579, + PositiveSpans: []Span{{0, 2}, {1, 1}, {0, 2}}, + PositiveBuckets: []float64{1, 0, 5, 7, 13}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "non-overlapping spans with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 20, + Sum: 1.234, + PositiveSpans: []Span{{2, 2}, {3, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 35, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 6}}, + PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "non-overlapping spans inverted order with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 20, + Sum: 1.234, + PositiveSpans: []Span{{2, 2}, {3, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 35, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 6}}, + PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "overlapping spans with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 27, + Sum: 1.234, + PositiveSpans: []Span{{1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 42, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 4}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "overlapping spans inverted order with custom buckets", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 27, + Sum: 1.234, + PositiveSpans: []Span{{1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 42, + Sum: 3.579, + PositiveSpans: []Span{{0, 4}, {0, 4}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "different custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 2.345, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4, 5}, + }, + nil, + "cannot apply this operation on custom buckets histograms with different custom bounds", + }, + { + "mix exponential and custom buckets histograms", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 59, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{2, 5, 4, 2, 3, 6, 7, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{4, 10, 1, 4, 14, 7}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + nil, + "cannot apply this operation on histograms with a mix of exponential and custom bucket schemas", }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - testHistogramAdd(t, c.in1, c.in2, c.expected) - testHistogramAdd(t, c.in2, c.in1, c.expected) + testHistogramAdd(t, c.in1, c.in2, c.expected, c.expErrMsg) + testHistogramAdd(t, c.in2, c.in1, c.expected, c.expErrMsg) }) } } -func testHistogramAdd(t *testing.T, a, b, expected *FloatHistogram) { +func testHistogramAdd(t *testing.T, a, b, expected *FloatHistogram, expErrMsg string) { var ( aCopy = a.Copy() bCopy = b.Copy() - expectedCopy = expected.Copy() + expectedCopy *FloatHistogram ) - res := aCopy.Add(bCopy) + if expected != nil { + expectedCopy = expected.Copy() + } - res.Compact(0) - expectedCopy.Compact(0) + res, err := aCopy.Add(bCopy) + if expErrMsg != "" { + require.EqualError(t, err, expErrMsg) + } else { + require.NoError(t, err) + } - require.Equal(t, expectedCopy, res) + if expected != nil { + res.Compact(0) + expectedCopy.Compact(0) - // Has it also happened in-place? - require.Equal(t, expectedCopy, aCopy) + require.Equal(t, expectedCopy, res) - // Check that the argument was not mutated. - require.Equal(t, b, bCopy) + // Has it also happened in-place? + require.Equal(t, expectedCopy, aCopy) + + // Check that the argument was not mutated. + require.Equal(t, b, bCopy) + } } func TestFloatHistogramSub(t *testing.T) { @@ -1718,6 +2262,7 @@ func TestFloatHistogramSub(t *testing.T) { cases := []struct { name string in1, in2, expected *FloatHistogram + expErrMsg string }{ { "same bucket layout", @@ -1751,6 +2296,7 @@ func TestFloatHistogramSub(t *testing.T) { NegativeSpans: []Span{{3, 2}, {3, 2}}, NegativeBuckets: []float64{2, 0, 1, 2}, }, + "", }, { "schema change", @@ -1786,38 +2332,126 @@ func TestFloatHistogramSub(t *testing.T) { NegativeSpans: []Span{{3, 3}, {1, 3}}, NegativeBuckets: []float64{1, 9, 1, 4, 9, 1}, }, + "", + }, + { + "same custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 4, + Sum: 11, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 1, 1, 1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + "", + }, + { + "different custom bucket layout", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 15, + Sum: 23, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + CustomValues: []float64{1, 2, 3, 4}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4, 5}, + }, + nil, + "cannot apply this operation on custom buckets histograms with different custom bounds", + }, + { + "mix exponential and custom buckets histograms", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 59, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{2, 5, 4, 2, 3, 6, 7, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{4, 10, 1, 4, 14, 7}, + }, + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 11, + Sum: 12, + PositiveSpans: []Span{{0, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + CustomValues: []float64{1, 2, 3, 4}, + }, + nil, + "cannot apply this operation on histograms with a mix of exponential and custom bucket schemas", }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - testFloatHistogramSub(t, c.in1, c.in2, c.expected) + testFloatHistogramSub(t, c.in1, c.in2, c.expected, c.expErrMsg) - expectedNegative := c.expected.Copy().Mul(-1) - testFloatHistogramSub(t, c.in2, c.in1, expectedNegative) + var expectedNegative *FloatHistogram + if c.expected != nil { + expectedNegative = c.expected.Copy().Mul(-1) + } + testFloatHistogramSub(t, c.in2, c.in1, expectedNegative, c.expErrMsg) }) } } -func testFloatHistogramSub(t *testing.T, a, b, expected *FloatHistogram) { +func testFloatHistogramSub(t *testing.T, a, b, expected *FloatHistogram, expErrMsg string) { var ( aCopy = a.Copy() bCopy = b.Copy() - expectedCopy = expected.Copy() + expectedCopy *FloatHistogram ) - res := aCopy.Sub(bCopy) + if expected != nil { + expectedCopy = expected.Copy() + } - res.Compact(0) - expectedCopy.Compact(0) + res, err := aCopy.Sub(bCopy) + if expErrMsg != "" { + require.EqualError(t, err, expErrMsg) + } else { + require.NoError(t, err) + } - require.Equal(t, expectedCopy, res) + if expected != nil { + res.Compact(0) + expectedCopy.Compact(0) - // Has it also happened in-place? - require.Equal(t, expectedCopy, aCopy) + require.Equal(t, expectedCopy, res) - // Check that the argument was not mutated. - require.Equal(t, b, bCopy) + // Has it also happened in-place? + require.Equal(t, expectedCopy, aCopy) + + // Check that the argument was not mutated. + require.Equal(t, b, bCopy) + } } func TestFloatHistogramCopyToSchema(t *testing.T) { @@ -1878,6 +2512,26 @@ func TestFloatHistogramCopyToSchema(t *testing.T) { NegativeBuckets: []float64{3, 1, 5, 6}, }, }, + { + "no schema change for custom buckets", + CustomBucketsSchema, + &FloatHistogram{ + Count: 30, + Sum: 2.345, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7}, + }, + &FloatHistogram{ + Count: 30, + Sum: 2.345, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{0, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7}, + }, + }, } for _, c := range cases { @@ -2134,7 +2788,7 @@ func TestAllFloatBucketIterator(t *testing.T) { } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { var expBuckets, actBuckets []Bucket[float64] if c.includeNeg { @@ -2360,7 +3014,7 @@ func TestAllReverseFloatBucketIterator(t *testing.T) { } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { var expBuckets, actBuckets []Bucket[float64] if c.includePos { @@ -2450,6 +3104,110 @@ func TestFloatBucketIteratorTargetSchema(t *testing.T) { require.False(t, it.Next(), "negative iterator not exhausted") } +func TestFloatCustomBucketsIterators(t *testing.T) { + cases := []struct { + h FloatHistogram + expPositiveBuckets []Bucket[float64] + }{ + { + h: FloatHistogram{ + Count: 622, + Sum: 10008.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{100, 344, 123, 55}, + CustomValues: []float64{10, 25, 50, 100, 500}, + }, + expPositiveBuckets: []Bucket[float64]{ + {Lower: math.Inf(-1), Upper: 10, LowerInclusive: true, UpperInclusive: true, Count: 100, Index: 0}, + {Lower: 10, Upper: 25, LowerInclusive: false, UpperInclusive: true, Count: 344, Index: 1}, + {Lower: 50, Upper: 100, LowerInclusive: false, UpperInclusive: true, Count: 123, Index: 3}, + {Lower: 500, Upper: math.Inf(1), LowerInclusive: false, UpperInclusive: true, Count: 55, Index: 5}, + }, + }, + { + h: FloatHistogram{ + Count: 622, + Sum: 10008.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 1}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []float64{100, 344, 123, 55}, + CustomValues: []float64{-10, -5, 0, 10, 25}, + }, + expPositiveBuckets: []Bucket[float64]{ + {Lower: math.Inf(-1), Upper: -10, LowerInclusive: true, UpperInclusive: true, Count: 100, Index: 0}, + {Lower: -10, Upper: -5, LowerInclusive: false, UpperInclusive: true, Count: 344, Index: 1}, + {Lower: 0, Upper: 10, LowerInclusive: false, UpperInclusive: true, Count: 123, Index: 3}, + {Lower: 25, Upper: math.Inf(1), LowerInclusive: false, UpperInclusive: true, Count: 55, Index: 5}, + }, + }, + } + + for i, c := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + { + it := c.h.AllBucketIterator() + for i, b := range c.expPositiveBuckets { + require.True(t, it.Next(), "all bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "all bucket iterator not exhausted") + + it = c.h.AllReverseBucketIterator() + length := len(c.expPositiveBuckets) + for j := 0; j < length; j++ { + i := length - j - 1 + b := c.expPositiveBuckets[i] + require.True(t, it.Next(), "all reverse bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "all reverse bucket iterator not exhausted") + + it = c.h.PositiveBucketIterator() + for i, b := range c.expPositiveBuckets { + require.True(t, it.Next(), "positive bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "positive bucket iterator not exhausted") + + it = c.h.PositiveReverseBucketIterator() + for j := 0; j < length; j++ { + i := length - j - 1 + b := c.expPositiveBuckets[i] + require.True(t, it.Next(), "positive reverse bucket iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "positive reverse bucket iterator not exhausted") + + it = c.h.NegativeBucketIterator() + require.False(t, it.Next(), "negative bucket iterator not exhausted") + + it = c.h.NegativeReverseBucketIterator() + require.False(t, it.Next(), "negative reverse bucket iterator not exhausted") + } + { + it := c.h.floatBucketIterator(true, 0, CustomBucketsSchema) + for i, b := range c.expPositiveBuckets { + require.True(t, it.Next(), "positive iterator exhausted too early") + require.Equal(t, b, it.At(), "bucket %d", i) + } + require.False(t, it.Next(), "positive iterator not exhausted") + + it = c.h.floatBucketIterator(false, 0, CustomBucketsSchema) + require.False(t, it.Next(), "negative iterator not exhausted") + } + }) + } +} + // TestFloatHistogramEquals tests FloatHistogram with float-specific cases that // cannot be covered by TestHistogramEquals. func TestFloatHistogramEquals(t *testing.T) { @@ -2498,6 +3256,42 @@ func TestFloatHistogramEquals(t *testing.T) { hNegBucketNaN.NegativeBuckets[0] = math.NaN() notEquals(h1, *hNegBucketNaN) equals(*hNegBucketNaN, *hNegBucketNaN) + + // Custom bounds are defined for exponential schema. + hCustom := h1.Copy() + hCustom.CustomValues = []float64{1, 2, 3} + equals(h1, *hCustom) + + cbh1 := FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 2.2, + Sum: 9.7, + PositiveSpans: []Span{{0, 1}}, + PositiveBuckets: []float64{3}, + CustomValues: []float64{1, 2, 3}, + } + + require.NoError(t, cbh1.Validate()) + + cbh2 := cbh1.Copy() + equals(cbh1, *cbh2) + + // Has different custom bounds for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.CustomValues = []float64{1, 2, 3, 4} + notEquals(cbh1, *cbh2) + + // Has non-empty negative spans and buckets for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.NegativeSpans = []Span{{Offset: 0, Length: 1}} + cbh2.NegativeBuckets = []float64{1} + notEquals(cbh1, *cbh2) + + // Has non-zero zero count and threshold for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.ZeroThreshold = 0.1 + cbh2.ZeroCount = 10 + notEquals(cbh1, *cbh2) } func TestFloatHistogramSize(t *testing.T) { @@ -2519,8 +3313,9 @@ func TestFloatHistogramSize(t *testing.T) { PositiveBuckets: nil, // 24 bytes. NegativeSpans: nil, // 24 bytes. NegativeBuckets: nil, // 24 bytes. + CustomValues: nil, // 24 bytes. }, - 8 + 4 + 4 + 8 + 8 + 8 + 8 + 24 + 24 + 24 + 24, + 8 + 4 + 4 + 8 + 8 + 8 + 8 + 24 + 24 + 24 + 24 + 24, }, { "complete struct", @@ -2540,8 +3335,29 @@ func TestFloatHistogramSize(t *testing.T) { {3, 2}, // 2 * 4 bytes. {3, 2}}, // 2 * 4 bytes. NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, // 24 bytes + 4 * 8 bytes. + CustomValues: nil, // 24 bytes. }, - 8 + 4 + 4 + 8 + 8 + 8 + 8 + (24 + 2*4 + 2*4) + (24 + 2*4 + 2*4) + (24 + 4*8) + (24 + 4*8), + 8 + 4 + 4 + 8 + 8 + 8 + 8 + (24 + 2*4 + 2*4) + (24 + 2*4 + 2*4) + (24 + 4*8) + (24 + 4*8) + 24, + }, + { + "complete struct with custom buckets", + &FloatHistogram{ // 8 bytes. + CounterResetHint: 0, // 1 byte. + Schema: CustomBucketsSchema, // 4 bytes. + ZeroThreshold: 0, // 8 bytes. + ZeroCount: 0, // 8 bytes. + Count: 3493.3, // 8 bytes. + Sum: 2349209.324, // 8 bytes. + PositiveSpans: []Span{ // 24 bytes. + {0, 1}, // 2 * 4 bytes. + {2, 3}, // 2 * 4 bytes. + }, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, // 24 bytes + 4 * 8 bytes. + NegativeSpans: nil, // 24 bytes. + NegativeBuckets: nil, // 24 bytes. + CustomValues: []float64{1, 2, 3}, // 24 bytes + 3 * 8 bytes. + }, + 8 + 4 + 4 + 8 + 8 + 8 + 8 + (24 + 2*4 + 2*4) + (24 + 4*8) + 24 + 24 + (24 + 3*8), }, } @@ -2552,6 +3368,58 @@ func TestFloatHistogramSize(t *testing.T) { } } +func TestFloatHistogramString(t *testing.T) { + cases := []struct { + name string + fh *FloatHistogram + expected string + }{ + { + "exponential histogram", + &FloatHistogram{ + Schema: 1, + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{ + {-2, 1}, + {2, 3}, + }, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{ + {3, 2}, + {3, 2}, + }, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + `{count:3493.3, sum:2.349209324e+06, [-22.62741699796952,-16):1000, [-16,-11.31370849898476):123400, [-4,-2.82842712474619):3, [-2.82842712474619,-2):3.1, [-0.01,0.01]:5.5, (0.35355339059327373,0.5]:1, (1,1.414213562373095]:3.3, (1.414213562373095,2]:4.2, (2,2.82842712474619]:0.1}`, + }, + { + "custom buckets histogram", + &FloatHistogram{ + Schema: CustomBucketsSchema, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{ + {0, 1}, + {2, 4}, + }, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 5}, + CustomValues: []float64{1, 2, 5, 10, 15, 20}, + }, + `{count:3493.3, sum:2.349209324e+06, [-Inf,1]:1, (5,10]:3.3, (10,15]:4.2, (15,20]:0.1, (20,+Inf]:5}`, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.NoError(t, c.fh.Validate()) + require.Equal(t, c.expected, c.fh.String()) + }) + } +} + func BenchmarkFloatHistogramAllBucketIterator(b *testing.B) { rng := rand.New(rand.NewSource(0)) diff --git a/model/histogram/generic.go b/model/histogram/generic.go index 7e1cc4b605..a36b58d069 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -20,14 +20,33 @@ import ( "strings" ) -var ( - ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") - ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") - ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") - ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") - ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") +const ( + ExponentialSchemaMax int32 = 8 + ExponentialSchemaMin int32 = -4 + CustomBucketsSchema int32 = -53 ) +var ( + ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") + ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") + ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") + ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") + ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") + ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few") + ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order") + ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite") + ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas") + ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds") +) + +func IsCustomBucketsSchema(s int32) bool { + return s == CustomBucketsSchema +} + +func IsExponentialSchema(s int32) bool { + return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax +} + // BucketCount is a type constraint for the count in a bucket, which can be // float64 (for type FloatHistogram) or uint64 (for type Histogram). type BucketCount interface { @@ -115,6 +134,8 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { currCount IBC // Count in the current bucket. currIdx int32 // The actual bucket index. + + customValues []float64 // Bounds (usually upper) for histograms with custom buckets. } func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] { @@ -128,14 +149,19 @@ func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { Index: b.currIdx, } if b.positive { - bucket.Upper = getBound(b.currIdx, schema) - bucket.Lower = getBound(b.currIdx-1, schema) + bucket.Upper = getBound(b.currIdx, schema, b.customValues) + bucket.Lower = getBound(b.currIdx-1, schema, b.customValues) } else { - bucket.Lower = -getBound(b.currIdx, schema) - bucket.Upper = -getBound(b.currIdx-1, schema) + bucket.Lower = -getBound(b.currIdx, schema, b.customValues) + bucket.Upper = -getBound(b.currIdx-1, schema, b.customValues) + } + if IsCustomBucketsSchema(schema) { + bucket.LowerInclusive = b.currIdx == 0 + bucket.UpperInclusive = true + } else { + bucket.LowerInclusive = bucket.Lower < 0 + bucket.UpperInclusive = bucket.Upper > 0 } - bucket.LowerInclusive = bucket.Lower < 0 - bucket.UpperInclusive = bucket.Upper > 0 return bucket } @@ -393,7 +419,55 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB return nil } -func getBound(idx, schema int32) float64 { +func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error { + prev := math.Inf(-1) + for _, curr := range bounds { + if curr <= prev { + return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid) + } + prev = curr + } + if prev == math.Inf(1) { + return fmt.Errorf("last +Inf bound must not be explicitly defined: %w", ErrHistogramCustomBucketsInfinite) + } + + var spanBuckets int + var totalSpanLength int + for n, span := range spans { + if span.Offset < 0 { + return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset) + } + spanBuckets += int(span.Length) + totalSpanLength += int(span.Length) + int(span.Offset) + } + if spanBuckets != numBuckets { + return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch) + } + if (len(bounds) + 1) < totalSpanLength { + return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch) + } + + return nil +} + +func getBound(idx, schema int32, customValues []float64) float64 { + if IsCustomBucketsSchema(schema) { + length := int32(len(customValues)) + switch { + case idx > length || idx < -1: + panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length)) + case idx == length: + return math.Inf(1) + case idx == -1: + return math.Inf(-1) + default: + return customValues[idx] + } + } + return getBoundExponential(idx, schema) +} + +func getBoundExponential(idx, schema int32) float64 { // Here a bit of context about the behavior for the last bucket counting // regular numbers (called simply "last bucket" below) and the bucket // counting observations of ±Inf (called "inf bucket" below, with an idx @@ -422,7 +496,7 @@ func getBound(idx, schema int32) float64 { // bucket results in precisely that. It is either frac=1.0 & exp=1024 // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, // by the way, a power of two where the exponent itself is a power of - // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // two, 2¹⁰ in fact, which coincides with a bucket boundary in all // schemas.) So these are the special cases we have to catch below. if schema < 0 { exp := int(idx) << -schema @@ -703,3 +777,10 @@ func reduceResolution[IBC InternalBucketCount]( return targetSpans, targetBuckets } + +func clearIfNotNil[T any](items []T) []T { + if items == nil { + return nil + } + return items[:0] +} diff --git a/model/histogram/generic_test.go b/model/histogram/generic_test.go index b4d6585a8b..b49adbcadf 100644 --- a/model/histogram/generic_test.go +++ b/model/histogram/generic_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestGetBound(t *testing.T) { +func TestGetBoundExponential(t *testing.T) { scenarios := []struct { idx int32 schema int32 @@ -105,7 +105,7 @@ func TestGetBound(t *testing.T) { } for _, s := range scenarios { - got := getBound(s.idx, s.schema) + got := getBoundExponential(s.idx, s.schema) if s.want != got { require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema) } diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index d94cf98fa6..e4b99ec420 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -49,11 +49,12 @@ const ( type Histogram struct { // Counter reset information. CounterResetHint CounterResetHint - // Currently valid schema numbers are -4 <= n <= 8. They are all for - // base-2 bucket schemas, where 1 is a bucket boundary in each case, and - // then each power of two is divided into 2^n logarithmic buckets. Or - // in other words, each bucket boundary is the previous boundary times - // 2^(2^-n). + // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets, + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in + // each case, and then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by + // the CustomValues field. Schema int32 // Width of the zero bucket. ZeroThreshold float64 @@ -69,6 +70,12 @@ type Histogram struct { // count. All following ones are deltas relative to the previous // element. PositiveBuckets, NegativeBuckets []int64 + // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. + // This slice is interned, to be treated as immutable and copied by reference. + // These numbers should be strictly increasing. This field is only used when the + // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans + // and NegativeBuckets fields are not used in that case. + CustomValues []float64 } // A Span defines a continuous sequence of buckets. @@ -80,33 +87,46 @@ type Span struct { Length uint32 } +func (h *Histogram) UsesCustomBuckets() bool { + return IsCustomBucketsSchema(h.Schema) +} + // Copy returns a deep copy of the Histogram. func (h *Histogram) Copy() *Histogram { c := Histogram{ CounterResetHint: h.CounterResetHint, Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: h.ZeroCount, Count: h.Count, Sum: h.Sum, } + if h.UsesCustomBuckets() { + if len(h.CustomValues) != 0 { + c.CustomValues = make([]float64, len(h.CustomValues)) + copy(c.CustomValues, h.CustomValues) + } + } else { + c.ZeroThreshold = h.ZeroThreshold + c.ZeroCount = h.ZeroCount + + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]int64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + } + if len(h.PositiveSpans) != 0 { c.PositiveSpans = make([]Span, len(h.PositiveSpans)) copy(c.PositiveSpans, h.PositiveSpans) } - if len(h.NegativeSpans) != 0 { - c.NegativeSpans = make([]Span, len(h.NegativeSpans)) - copy(c.NegativeSpans, h.NegativeSpans) - } if len(h.PositiveBuckets) != 0 { c.PositiveBuckets = make([]int64, len(h.PositiveBuckets)) copy(c.PositiveBuckets, h.PositiveBuckets) } - if len(h.NegativeBuckets) != 0 { - c.NegativeBuckets = make([]int64, len(h.NegativeBuckets)) - copy(c.NegativeBuckets, h.NegativeBuckets) - } return &c } @@ -116,22 +136,36 @@ func (h *Histogram) Copy() *Histogram { func (h *Histogram) CopyTo(to *Histogram) { to.CounterResetHint = h.CounterResetHint to.Schema = h.Schema - to.ZeroThreshold = h.ZeroThreshold - to.ZeroCount = h.ZeroCount to.Count = h.Count to.Sum = h.Sum + if h.UsesCustomBuckets() { + to.ZeroThreshold = 0 + to.ZeroCount = 0 + + to.NegativeSpans = clearIfNotNil(to.NegativeSpans) + to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) + + to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) + copy(to.CustomValues, h.CustomValues) + } else { + to.ZeroThreshold = h.ZeroThreshold + to.ZeroCount = h.ZeroCount + + to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) + copy(to.NegativeSpans, h.NegativeSpans) + + to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) + copy(to.NegativeBuckets, h.NegativeBuckets) + + to.CustomValues = clearIfNotNil(to.CustomValues) + } + to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) copy(to.PositiveSpans, h.PositiveSpans) - to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) - copy(to.NegativeSpans, h.NegativeSpans) - to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) copy(to.PositiveBuckets, h.PositiveBuckets) - - to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) - copy(to.NegativeBuckets, h.NegativeBuckets) } // String returns a string representation of the Histogram. @@ -165,8 +199,11 @@ func (h *Histogram) String() string { return sb.String() } -// ZeroBucket returns the zero bucket. +// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets. func (h *Histogram) ZeroBucket() Bucket[uint64] { + if h.UsesCustomBuckets() { + panic("histograms with custom buckets have no zero bucket") + } return Bucket[uint64]{ Lower: -h.ZeroThreshold, Upper: h.ZeroThreshold, @@ -179,14 +216,14 @@ func (h *Histogram) ZeroBucket() Bucket[uint64] { // PositiveBucketIterator returns a BucketIterator to iterate over all positive // buckets in ascending order (starting next to the zero bucket and going up). func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { - it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) + it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues) return &it } // NegativeBucketIterator returns a BucketIterator to iterate over all negative // buckets in descending order (starting next to the zero bucket and going down). func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] { - it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) + it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil) return &it } @@ -207,30 +244,42 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] { // but they must represent the same bucket layout to match. // Sum is compared based on its bit pattern because this method // is about data equality rather than mathematical equality. +// We ignore fields that are not used based on the exponential / custom buckets schema, +// but check fields where differences may cause unintended behaviour even if they are not +// supposed to be used according to the schema. func (h *Histogram) Equals(h2 *Histogram) bool { if h2 == nil { return false } - if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || - h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || + if h.Schema != h2.Schema || h.Count != h2.Count || math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { return false } + if h.UsesCustomBuckets() { + if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) { + return false + } + } + + if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount { + return false + } + + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { return false } - if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { - return false - } - if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) { return false } - if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) { - return false - } return true } @@ -321,17 +370,36 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { } fh.CounterResetHint = h.CounterResetHint fh.Schema = h.Schema - fh.ZeroThreshold = h.ZeroThreshold - fh.ZeroCount = float64(h.ZeroCount) fh.Count = float64(h.Count) fh.Sum = h.Sum + if h.UsesCustomBuckets() { + fh.ZeroThreshold = 0 + fh.ZeroCount = 0 + fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans) + fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets) + + fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues)) + copy(fh.CustomValues, h.CustomValues) + } else { + fh.ZeroThreshold = h.ZeroThreshold + fh.ZeroCount = float64(h.ZeroCount) + + fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans)) + copy(fh.NegativeSpans, h.NegativeSpans) + + fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets)) + var currentNegative float64 + for i, b := range h.NegativeBuckets { + currentNegative += float64(b) + fh.NegativeBuckets[i] = currentNegative + } + fh.CustomValues = clearIfNotNil(fh.CustomValues) + } + fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) copy(fh.PositiveSpans, h.PositiveSpans) - fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans)) - copy(fh.NegativeSpans, h.NegativeSpans) - fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets)) var currentPositive float64 for i, b := range h.PositiveBuckets { @@ -339,13 +407,6 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { fh.PositiveBuckets[i] = currentPositive } - fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets)) - var currentNegative float64 - for i, b := range h.NegativeBuckets { - currentNegative += float64(b) - fh.NegativeBuckets[i] = currentNegative - } - return fh } @@ -357,25 +418,47 @@ func resize[T any](items []T, n int) []T { } // Validate validates consistency between span and bucket slices. Also, buckets are checked -// against negative values. +// against negative values. We check to make sure there are no unexpected fields or field values +// based on the exponential / custom buckets schema. // For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a // strict h.Count = nCount + pCount + h.ZeroCount check is performed. // Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount), // because NaN observations do not increment the values of buckets (but they do increment // the total h.Count). func (h *Histogram) Validate() error { - if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return fmt.Errorf("negative side: %w", err) - } - if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return fmt.Errorf("positive side: %w", err) - } var nCount, pCount uint64 - err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) - if err != nil { - return fmt.Errorf("negative side: %w", err) + if h.UsesCustomBuckets() { + if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("custom buckets: %w", err) + } + if h.ZeroCount != 0 { + return fmt.Errorf("custom buckets: must have zero count of 0") + } + if h.ZeroThreshold != 0 { + return fmt.Errorf("custom buckets: must have zero threshold of 0") + } + if len(h.NegativeSpans) > 0 { + return fmt.Errorf("custom buckets: must not have negative spans") + } + if len(h.NegativeBuckets) > 0 { + return fmt.Errorf("custom buckets: must not have negative buckets") + } + } else { + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + if h.CustomValues != nil { + return fmt.Errorf("histogram with exponential schema must not have custom bounds") + } } - err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true) + err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) if err != nil { return fmt.Errorf("positive side: %w", err) } @@ -398,12 +481,13 @@ type regularBucketIterator struct { baseBucketIterator[uint64, int64] } -func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) regularBucketIterator { +func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customValues []float64) regularBucketIterator { i := baseBucketIterator[uint64, int64]{ - schema: schema, - spans: spans, - buckets: buckets, - positive: positive, + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + customValues: customValues, } return regularBucketIterator{i} } @@ -477,7 +561,7 @@ func (c *cumulativeBucketIterator) Next() bool { if c.emptyBucketCount > 0 { // We are traversing through empty buckets at the moment. - c.currUpper = getBound(c.currIdx, c.h.Schema) + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues) c.currIdx++ c.emptyBucketCount-- return true @@ -494,7 +578,7 @@ func (c *cumulativeBucketIterator) Next() bool { c.currCount += c.h.PositiveBuckets[c.posBucketsIdx] c.currCumulativeCount += uint64(c.currCount) - c.currUpper = getBound(c.currIdx, c.h.Schema) + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues) c.posBucketsIdx++ c.idxInSpan++ @@ -524,7 +608,15 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] { // ReduceResolution reduces the histogram's spans, buckets into target schema. // The target schema must be smaller than the current histogram's schema. +// This will panic if the histogram has custom buckets or if the target schema is +// a custom buckets schema. func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { + if h.UsesCustomBuckets() { + panic("cannot reduce resolution when there are custom buckets") + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } if targetSchema >= h.Schema { panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) } diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index 14a948e644..edc8663c94 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -14,8 +14,8 @@ package histogram import ( - "fmt" "math" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -69,10 +69,25 @@ func TestHistogramString(t *testing.T) { }, expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}", }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + Count: 19, + Sum: 2.7, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + CustomValues: []float64{1, 2, 5, 10, 15, 20, 25, 50}, + }, + expectedString: "{count:19, sum:2.7, [-Inf,1]:1, (1,2]:3, (2,5]:1, (5,10]:2, (10,15]:1, (15,20]:1, (20,25]:1}", + }, } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { actualString := c.histogram.String() require.Equal(t, c.expectedString, actualString) }) @@ -208,10 +223,30 @@ func TestCumulativeBucketIterator(t *testing.T) { {Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2}, }, }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{5, 10, 20, 50}, + }, + expectedBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: math.Inf(-1), Upper: 10, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 1}, + + {Lower: math.Inf(-1), Upper: 20, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 2}, + + {Lower: math.Inf(-1), Upper: 50, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 3}, + {Lower: math.Inf(-1), Upper: math.Inf(1), Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 4}, + }, + }, } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { it := c.histogram.CumulativeBucketIterator() actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets)) for it.Next() { @@ -368,10 +403,66 @@ func TestRegularBucketIterator(t *testing.T) { }, expectedNegativeBuckets: []Bucket[uint64]{}, }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{5, 10, 20, 50}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: 5, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + + {Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3}, + {Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{0, 10, 20, 50}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 0, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: 0, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + + {Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3}, + {Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, + { + histogram: Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 5}, + }, + PositiveBuckets: []int64{1, 1, 0, -1, 0}, + CustomValues: []float64{-5, 0, 20, 50}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: -5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: -5, Upper: 0, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + {Lower: 0, Upper: 20, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 2}, + {Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3}, + {Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { it := c.histogram.PositiveBucketIterator() actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets)) for it.Next() { @@ -461,11 +552,81 @@ func TestHistogramToFloat(t *testing.T) { } } +func TestCustomBucketsHistogramToFloat(t *testing.T) { + h := Histogram{ + Schema: CustomBucketsSchema, + Count: 10, + Sum: 2.7, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + CustomValues: []float64{5, 10, 20, 50, 100, 500}, + } + cases := []struct { + name string + fh *FloatHistogram + }{ + {name: "without prior float histogram"}, + {name: "prior float histogram with more buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }}, + {name: "prior float histogram with fewer buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2}, + }}, + } + + require.NoError(t, h.Validate()) + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + hStr := h.String() + fh := h.ToFloat(c.fh) + require.NoError(t, fh.Validate()) + require.Equal(t, hStr, h.String()) + require.Equal(t, hStr, fh.String()) + }) + } +} + // TestHistogramEquals tests both Histogram and FloatHistogram. func TestHistogramEquals(t *testing.T) { h1 := Histogram{ Schema: 3, - Count: 61, + Count: 62, Sum: 2.7, ZeroThreshold: 0.1, ZeroCount: 42, @@ -495,6 +656,15 @@ func TestHistogramEquals(t *testing.T) { require.False(t, h1f.Equals(h2f)) require.False(t, h2f.Equals(h1f)) } + notEqualsUntilFloatConv := func(h1, h2 Histogram) { + require.False(t, h1.Equals(&h2)) + require.False(t, h2.Equals(&h1)) + h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil) + require.True(t, h1f.Equals(h2f)) + require.True(t, h2f.Equals(h1f)) + } + + require.NoError(t, h1.Validate()) h2 := h1.Copy() equals(h1, *h2) @@ -602,6 +772,45 @@ func TestHistogramEquals(t *testing.T) { // Sum StaleNaN vs regular NaN. notEquals(*hStale, *hNaN) + + // Has non-empty custom bounds for exponential schema. + hCustom := h1.Copy() + hCustom.CustomValues = []float64{1, 2, 3} + equals(h1, *hCustom) + + cbh1 := Histogram{ + Schema: CustomBucketsSchema, + Count: 10, + Sum: 2.7, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 10, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + CustomValues: []float64{0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 200, 250, 500, 1000}, + } + + require.NoError(t, cbh1.Validate()) + + cbh2 := cbh1.Copy() + equals(cbh1, *cbh2) + + // Has different custom bounds for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.CustomValues = []float64{0.1, 0.2, 0.5} + notEquals(cbh1, *cbh2) + + // Has non-empty negative spans and buckets for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.NegativeSpans = []Span{{Offset: 0, Length: 1}} + cbh2.NegativeBuckets = []int64{1} + notEqualsUntilFloatConv(cbh1, *cbh2) + + // Has non-zero zero count and threshold for custom buckets schema. + cbh2 = cbh1.Copy() + cbh2.ZeroThreshold = 0.1 + cbh2.ZeroCount = 10 + notEqualsUntilFloatConv(cbh1, *cbh2) } func TestHistogramCopy(t *testing.T) { @@ -640,6 +849,21 @@ func TestHistogramCopy(t *testing.T) { }, expected: &Histogram{}, }, + { + name: "with custom buckets", + orig: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + expected: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + }, } for _, tcase := range cases { @@ -690,6 +914,21 @@ func TestHistogramCopyTo(t *testing.T) { }, expected: &Histogram{}, }, + { + name: "with custom buckets", + orig: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + expected: &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + }, } for _, tcase := range cases { @@ -971,6 +1210,86 @@ func TestHistogramCompact(t *testing.T) { NegativeBuckets: []int64{2, 3}, }, }, + { + "nothing should happen with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + CustomValues: []float64{5, 10, 15}, + }, + }, + { + "eliminate zero offsets with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 5}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + }, + { + "eliminate zero length with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + }, + { + "eliminate multiple zero length spans with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {9, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + }, + { + "cut empty buckets at start or end of spans, even in the middle, with custom buckets", + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-4, 6}, {3, 6}}, + PositiveBuckets: []int64{0, 0, 1, 3, -4, 0, 1, 42, 3, -46, 0, 0}, + CustomValues: []float64{5, 10, 15, 20}, + }, + 0, + &Histogram{ + Schema: CustomBucketsSchema, + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + CustomValues: []float64{5, 10, 15, 20}, + }, + }, } for _, c := range cases { @@ -1107,6 +1426,145 @@ func TestHistogramValidation(t *testing.T) { errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`, skipFloat: true, }, + "rejects an exponential histogram with custom buckets schema": { + h: &Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{1, 1, -1, 0}, + }, + errMsg: `custom buckets: only 0 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`, + }, + "rejects a custom buckets histogram with exponential schema": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4}, + }, + errMsg: `histogram with exponential schema must not have custom bounds`, + skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation + }, + "rejects a custom buckets histogram with zero/negative buckets": { + h: &Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: must have zero count of 0`, + skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation + }, + "rejects a custom buckets histogram with negative offset in first span": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: -1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: span number 1 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a custom buckets histogram with negative offset in subsequent spans": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: -1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: span number 2 with offset -1: histogram has a span whose offset is negative`, + }, + "rejects a custom buckets histogram with non-matching bucket counts": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1}, + CustomValues: []float64{1, 2, 3, 4}, + }, + errMsg: `custom buckets: spans need 4 buckets, have 3 buckets: histogram spans specify different number of buckets than provided`, + }, + "rejects a custom buckets histogram with too few bounds": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3}, + }, + errMsg: `custom buckets: only 3 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`, + }, + "valid custom buckets histogram": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4}, + }, + }, + "valid custom buckets histogram with extra bounds": { + h: &Histogram{ + Count: 5, + Sum: 19.4, + Schema: CustomBucketsSchema, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8}, + }, + }, } for testName, tc := range tests { diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index f46321c97e..4bc94f84fe 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -18,6 +18,7 @@ import ( "encoding/json" "slices" "strconv" + "unsafe" "github.com/prometheus/common/model" ) @@ -215,3 +216,7 @@ func contains(s []Label, n string) bool { } return false } + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go index dfc74aa3a3..0e5bb048be 100644 --- a/model/labels/labels_dedupelabels.go +++ b/model/labels/labels_dedupelabels.go @@ -20,7 +20,6 @@ import ( "slices" "strings" "sync" - "unsafe" "github.com/cespare/xxhash/v2" ) @@ -105,30 +104,39 @@ func (t *nameTable) ToName(num int) string { return t.byNum[num] } +// "Varint" in this file is non-standard: we encode small numbers (up to 32767) in 2 bytes, +// because we expect most Prometheus to have more than 127 unique strings. +// And we don't encode numbers larger than 4 bytes because we don't expect more than 536,870,912 unique strings. func decodeVarint(data string, index int) (int, int) { - // Fast-path for common case of a single byte, value 0..127. - b := data[index] + b := int(data[index]) + int(data[index+1])<<8 + index += 2 + if b < 0x8000 { + return b, index + } + return decodeVarintRest(b, data, index) +} + +func decodeVarintRest(b int, data string, index int) (int, int) { + value := int(b & 0x7FFF) + b = int(data[index]) index++ if b < 0x80 { - return int(b), index + return value | (b << 15), index } - value := int(b & 0x7F) - for shift := uint(7); ; shift += 7 { - // Just panic if we go of the end of data, since all Labels strings are constructed internally and - // malformed data indicates a bug, or memory corruption. - b := data[index] - index++ - value |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - return value, index + + value |= (b & 0x7f) << 15 + b = int(data[index]) + index++ + return value | (b << 22), index } func decodeString(t *nameTable, data string, index int) (string, int) { - var num int - num, index = decodeVarint(data, index) + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(data[index]) + int(data[index+1])<<8 + index += 2 + if num >= 0x8000 { + num, index = decodeVarintRest(num, data, index) + } return t.ToName(num), index } @@ -322,7 +330,12 @@ func (ls Labels) Get(name string) string { } else if lName[0] > name[0] { // Stop looking if we've gone past. break } - _, i = decodeVarint(ls.data, i) + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(ls.data[i]) + int(ls.data[i+1])<<8 + i += 2 + if num >= 0x8000 { + _, i = decodeVarintRest(num, ls.data, i) + } } return "" } @@ -340,7 +353,12 @@ func (ls Labels) Has(name string) bool { } else if lName[0] > name[0] { // Stop looking if we've gone past. break } - _, i = decodeVarint(ls.data, i) + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(ls.data[i]) + int(ls.data[i+1])<<8 + i += 2 + if num >= 0x8000 { + _, i = decodeVarintRest(num, ls.data, i) + } } return false } @@ -426,10 +444,6 @@ func EmptyLabels() Labels { return Labels{} } -func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) -} - // New returns a sorted Labels from the given labels. // The caller has to guarantee that all label names are unique. // Note this function is not efficient; should not be used in performance-critical places. @@ -646,29 +660,24 @@ func marshalNumbersToSizedBuffer(nums []int, data []byte) int { func sizeVarint(x uint64) (n int) { // Most common case first - if x < 1<<7 { - return 1 + if x < 1<<15 { + return 2 } - if x >= 1<<56 { - return 9 + if x < 1<<22 { + return 3 } - if x >= 1<<28 { - x >>= 28 - n = 4 + if x >= 1<<29 { + panic("Number too large to represent") } - if x >= 1<<14 { - x >>= 14 - n += 2 - } - if x >= 1<<7 { - n++ - } - return n + 1 + return 4 } func encodeVarintSlow(data []byte, offset int, v uint64) int { offset -= sizeVarint(v) base := offset + data[offset] = uint8(v) + v >>= 8 + offset++ for v >= 1<<7 { data[offset] = uint8(v&0x7f | 0x80) v >>= 7 @@ -678,11 +687,12 @@ func encodeVarintSlow(data []byte, offset int, v uint64) int { return base } -// Special code for the common case that a value is less than 128 +// Special code for the common case that a value is less than 32768 func encodeVarint(data []byte, offset, v int) int { - if v < 1<<7 { - offset-- + if v < 1<<15 { + offset -= 2 data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) return offset } return encodeVarintSlow(data, offset, uint64(v)) diff --git a/model/labels/labels_dedupelabels_test.go b/model/labels/labels_dedupelabels_test.go new file mode 100644 index 0000000000..5ef9255c21 --- /dev/null +++ b/model/labels/labels_dedupelabels_test.go @@ -0,0 +1,50 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build dedupelabels + +package labels + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestVarint(t *testing.T) { + cases := []struct { + v int + expected []byte + }{ + {0, []byte{0, 0}}, + {1, []byte{1, 0}}, + {2, []byte{2, 0}}, + {0x7FFF, []byte{0xFF, 0x7F}}, + {0x8000, []byte{0x00, 0x80, 0x01}}, + {0x8001, []byte{0x01, 0x80, 0x01}}, + {0x3FFFFF, []byte{0xFF, 0xFF, 0x7F}}, + {0x400000, []byte{0x00, 0x80, 0x80, 0x01}}, + {0x400001, []byte{0x01, 0x80, 0x80, 0x01}}, + {0x1FFFFFFF, []byte{0xFF, 0xFF, 0xFF, 0x7F}}, + } + var buf [16]byte + for _, c := range cases { + n := encodeVarint(buf[:], len(buf), c.v) + require.Equal(t, len(c.expected), len(buf)-n) + require.Equal(t, c.expected, buf[n:]) + got, m := decodeVarint(string(buf[:]), n) + require.Equal(t, c.v, got) + require.Equal(t, len(buf), m) + } + require.Panics(t, func() { encodeVarint(buf[:], len(buf), 1<<29) }) +} diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index 9ef764daec..bccceb61fe 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -299,11 +299,6 @@ func Equal(ls, o Labels) bool { func EmptyLabels() Labels { return Labels{} } - -func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) -} - func yoloBytes(s string) (b []byte) { *(*string)(unsafe.Pointer(&b)) = s (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 3d6e7659f4..d8910cdc85 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -17,6 +17,7 @@ import ( "encoding/json" "fmt" "net/http" + "strconv" "strings" "testing" @@ -465,6 +466,38 @@ func TestLabels_DropMetricName(t *testing.T) { require.True(t, Equal(original, check)) } +func ScratchBuilderForBenchmark() ScratchBuilder { + // (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.) + b := NewScratchBuilder(256) + for i := 0; i < 256; i++ { + b.Add(fmt.Sprintf("name%d", i), fmt.Sprintf("value%d", i)) + } + b.Labels() + b.Reset() + return b +} + +func NewForBenchmark(ls ...Label) Labels { + b := ScratchBuilderForBenchmark() + for _, l := range ls { + b.Add(l.Name, l.Value) + } + b.Sort() + return b.Labels() +} + +func FromStringsForBenchmark(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + b := ScratchBuilderForBenchmark() + for i := 0; i < len(ss); i += 2 { + b.Add(ss[i], ss[i+1]) + } + b.Sort() + return b.Labels() +} + // BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation // The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels. // In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here) @@ -487,7 +520,7 @@ func BenchmarkLabels_Get(b *testing.B) { } for _, size := range []int{5, 10, maxLabels} { b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) { - labels := New(allLabels[:size]...) + labels := NewForBenchmark(allLabels[:size]...) for _, scenario := range []struct { desc, label string }{ @@ -519,33 +552,33 @@ var comparisonBenchmarkScenarios = []struct { }{ { "equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), }, { "not equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), }, { "different sizes", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStringsForBenchmark("a_label_name", "a_label_value"), }, { "lots", - FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), - FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), + FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), + FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), }, { "real long equal", - FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), - FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), }, { "real long different end", - FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), - FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"), + FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"), }, } @@ -732,7 +765,7 @@ func TestScratchBuilder(t *testing.T) { want: FromStrings("ddd", "444"), }, } { - t.Run(fmt.Sprint(i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { b := NewScratchBuilder(len(tcase.add)) for _, lbl := range tcase.add { b.Add(lbl.Name, lbl.Value) @@ -833,7 +866,7 @@ func BenchmarkBuilder(b *testing.B) { } func BenchmarkLabels_Copy(b *testing.B) { - l := New(benchmarkLabels...) + l := NewForBenchmark(benchmarkLabels...) for i := 0; i < b.N; i++ { l = l.Copy() diff --git a/model/labels/matcher.go b/model/labels/matcher.go index 1282f80d63..a09c838e3f 100644 --- a/model/labels/matcher.go +++ b/model/labels/matcher.go @@ -14,7 +14,8 @@ package labels import ( - "fmt" + "bytes" + "strconv" ) // MatchType is an enum for label matching types. @@ -78,7 +79,29 @@ func MustNewMatcher(mt MatchType, name, val string) *Matcher { } func (m *Matcher) String() string { - return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value) + // Start a buffer with a pre-allocated size on stack to cover most needs. + var bytea [1024]byte + b := bytes.NewBuffer(bytea[:0]) + + if m.shouldQuoteName() { + b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Name)) + } else { + b.WriteString(m.Name) + } + b.WriteString(m.Type.String()) + b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Value)) + + return b.String() +} + +func (m *Matcher) shouldQuoteName() bool { + for i, c := range m.Name { + if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (i > 0 && c >= '0' && c <= '9') { + continue + } + return true + } + return len(m.Name) == 0 } // Matches returns whether the matcher matches the given string value. diff --git a/model/labels/matcher_test.go b/model/labels/matcher_test.go index c23deafe61..ff39d40d0f 100644 --- a/model/labels/matcher_test.go +++ b/model/labels/matcher_test.go @@ -15,6 +15,7 @@ package labels import ( "fmt" + "math/rand" "testing" "github.com/stretchr/testify/require" @@ -225,3 +226,128 @@ func BenchmarkNewMatcher(b *testing.B) { } }) } + +func BenchmarkMatcher_String(b *testing.B) { + type benchCase struct { + name string + matchers []*Matcher + } + cases := []benchCase{ + { + name: "short name equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "foo", "bar"), + MustNewMatcher(MatchEqual, "bar", "baz"), + MustNewMatcher(MatchEqual, "abc", "def"), + MustNewMatcher(MatchEqual, "ghi", "klm"), + MustNewMatcher(MatchEqual, "nop", "qrs"), + }, + }, + { + name: "short quoted name not equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "f.o", "bar"), + MustNewMatcher(MatchEqual, "b.r", "baz"), + MustNewMatcher(MatchEqual, "a.c", "def"), + MustNewMatcher(MatchEqual, "g.i", "klm"), + MustNewMatcher(MatchEqual, "n.p", "qrs"), + }, + }, + { + name: "short quoted name with quotes not equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, `"foo"`, "bar"), + MustNewMatcher(MatchEqual, `"foo"`, "baz"), + MustNewMatcher(MatchEqual, `"foo"`, "def"), + MustNewMatcher(MatchEqual, `"foo"`, "klm"), + MustNewMatcher(MatchEqual, `"foo"`, "qrs"), + }, + }, + { + name: "short name value with quotes equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "foo", `"bar"`), + MustNewMatcher(MatchEqual, "bar", `"baz"`), + MustNewMatcher(MatchEqual, "abc", `"def"`), + MustNewMatcher(MatchEqual, "ghi", `"klm"`), + MustNewMatcher(MatchEqual, "nop", `"qrs"`), + }, + }, + { + name: "short name and long value regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "foo", "five_six_seven_eight_nine_ten_one_two_three_four"), + MustNewMatcher(MatchRegexp, "bar", "one_two_three_four_five_six_seven_eight_nine_ten"), + MustNewMatcher(MatchRegexp, "abc", "two_three_four_five_six_seven_eight_nine_ten_one"), + MustNewMatcher(MatchRegexp, "ghi", "three_four_five_six_seven_eight_nine_ten_one_two"), + MustNewMatcher(MatchRegexp, "nop", "four_five_six_seven_eight_nine_ten_one_two_three"), + }, + }, + { + name: "short name and long value with quotes equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "foo", `five_six_seven_eight_nine_ten_"one"_two_three_four`), + MustNewMatcher(MatchEqual, "bar", `one_two_three_four_five_six_"seven"_eight_nine_ten`), + MustNewMatcher(MatchEqual, "abc", `two_three_four_five_six_seven_"eight"_nine_ten_one`), + MustNewMatcher(MatchEqual, "ghi", `three_four_five_six_seven_eight_"nine"_ten_one_two`), + MustNewMatcher(MatchEqual, "nop", `four_five_six_seven_eight_nine_"ten"_one_two_three`), + }, + }, + { + name: "long name regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "val"), + MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "val"), + MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "val"), + MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "val"), + MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "val"), + }, + }, + { + name: "long quoted name regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "val"), + MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "val"), + MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "val"), + MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "val"), + MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "val"), + }, + }, + { + name: "long name and long value regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "five_six_seven_eight_nine_ten_one_two_three_four"), + MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "one_two_three_four_five_six_seven_eight_nine_ten"), + MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "two_three_four_five_six_seven_eight_nine_ten_one"), + MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "three_four_five_six_seven_eight_nine_ten_one_two"), + MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "four_five_six_seven_eight_nine_ten_one_two_three"), + }, + }, + { + name: "long quoted name and long value regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "five.six.seven.eight.nine.ten.one.two.three.four"), + MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "one.two.three.four.five.six.seven.eight.nine.ten"), + MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "two.three.four.five.six.seven.eight.nine.ten.one"), + MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "three.four.five.six.seven.eight.nine.ten.one.two"), + MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "four.five.six.seven.eight.nine.ten.one.two.three"), + }, + }, + } + + var mixed []*Matcher + for _, bc := range cases { + mixed = append(mixed, bc.matchers...) + } + rand.Shuffle(len(mixed), func(i, j int) { mixed[i], mixed[j] = mixed[j], mixed[i] }) + cases = append(cases, benchCase{name: "mixed", matchers: mixed}) + + for _, bc := range cases { + b.Run(bc.name, func(b *testing.B) { + for i := 0; i <= b.N; i++ { + m := bc.matchers[i%len(bc.matchers)] + _ = m.String() + } + }) + } +} diff --git a/model/labels/regexp.go b/model/labels/regexp.go index f35dc76f60..d2151d83dd 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -16,16 +16,19 @@ package labels import ( "slices" "strings" + "unicode" + "unicode/utf8" "github.com/grafana/regexp" "github.com/grafana/regexp/syntax" + "golang.org/x/text/unicode/norm" ) const ( maxSetMatches = 256 // The minimum number of alternate values a regex should have to trigger - // the optimization done by optimizeEqualStringMatchers() and so use a map + // the optimization done by optimizeEqualOrPrefixStringMatchers() and so use a map // to match values instead of iterating over a list. This value has // been computed running BenchmarkOptimizeEqualStringMatchers. minEqualMultiStringMatcherMapThreshold = 16 @@ -41,7 +44,7 @@ type FastRegexMatcher struct { stringMatcher StringMatcher prefix string suffix string - contains string + contains []string // matchString is the "compiled" function to run by MatchString(). matchString func(string) bool @@ -86,7 +89,7 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { // compileMatchStringFunction returns the function to run by MatchString(). func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool { // If the only optimization available is the string matcher, then we can just run it. - if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && m.contains == "" && m.stringMatcher != nil { + if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && len(m.contains) == 0 && m.stringMatcher != nil { return m.stringMatcher.Matches } @@ -105,7 +108,7 @@ func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool { if m.suffix != "" && !strings.HasSuffix(s, m.suffix) { return false } - if m.contains != "" && !strings.Contains(s, m.contains) { + if len(m.contains) > 0 && !containsInOrder(s, m.contains) { return false } if m.stringMatcher != nil { @@ -118,7 +121,7 @@ func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool { // IsOptimized returns true if any fast-path optimization is applied to the // regex matcher. func (m *FastRegexMatcher) IsOptimized() bool { - return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || m.contains != "" + return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || len(m.contains) > 0 } // findSetMatches extract equality matches from a regexp. @@ -334,7 +337,7 @@ func optimizeAlternatingLiterals(s string) (StringMatcher, []string) { return nil, nil } - multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates) + multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates, 0, 0) for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') { // Split the string into the next literal and the remainder @@ -360,8 +363,9 @@ func optimizeAlternatingLiterals(s string) (StringMatcher, []string) { // optimizeConcatRegex returns literal prefix/suffix text that can be safely // checked against the label value before running the regexp matcher. -func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { +func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string, contains []string) { sub := r.Sub + clearCapture(sub...) // We can safely remove begin and end text matchers respectively // at the beginning and end of the regexp. @@ -386,13 +390,11 @@ func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { suffix = string(sub[last].Rune) } - // If contains any literal which is not a prefix/suffix, we keep the - // 1st one. We do not keep the whole list of literals to simplify the - // fast path. + // If contains any literal which is not a prefix/suffix, we keep track of + // all the ones which are case-sensitive. for i := 1; i < len(sub)-1; i++ { if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 { - contains = string(sub[i].Rune) - break + contains = append(contains, string(sub[i].Rune)) } } @@ -410,7 +412,7 @@ func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher { clearBeginEndText(re) m := stringMatcherFromRegexpInternal(re) - m = optimizeEqualStringMatchers(m, minEqualMultiStringMatcherMapThreshold) + m = optimizeEqualOrPrefixStringMatchers(m, minEqualMultiStringMatcherMapThreshold) return m } @@ -547,11 +549,7 @@ func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher { // Right matcher with 1 fixed set match. case left == nil && len(matches) == 1: - return &literalPrefixStringMatcher{ - prefix: matches[0], - prefixCaseSensitive: matchesCaseSensitive, - right: right, - } + return newLiteralPrefixStringMatcher(matches[0], matchesCaseSensitive, right) // Left matcher with 1 fixed set match. case right == nil && len(matches) == 1: @@ -629,21 +627,47 @@ func (m *containsStringMatcher) Matches(s string) bool { return false } -// literalPrefixStringMatcher matches a string with the given literal prefix and right side matcher. -type literalPrefixStringMatcher struct { - prefix string - prefixCaseSensitive bool +func newLiteralPrefixStringMatcher(prefix string, prefixCaseSensitive bool, right StringMatcher) StringMatcher { + if prefixCaseSensitive { + return &literalPrefixSensitiveStringMatcher{ + prefix: prefix, + right: right, + } + } + + return &literalPrefixInsensitiveStringMatcher{ + prefix: prefix, + right: right, + } +} + +// literalPrefixSensitiveStringMatcher matches a string with the given literal case-sensitive prefix and right side matcher. +type literalPrefixSensitiveStringMatcher struct { + prefix string // The matcher that must match the right side. Can be nil. right StringMatcher } -func (m *literalPrefixStringMatcher) Matches(s string) bool { - // Ensure the prefix matches. - if m.prefixCaseSensitive && !strings.HasPrefix(s, m.prefix) { +func (m *literalPrefixSensitiveStringMatcher) Matches(s string) bool { + if !strings.HasPrefix(s, m.prefix) { return false } - if !m.prefixCaseSensitive && !hasPrefixCaseInsensitive(s, m.prefix) { + + // Ensure the right side matches. + return m.right.Matches(s[len(m.prefix):]) +} + +// literalPrefixInsensitiveStringMatcher matches a string with the given literal case-insensitive prefix and right side matcher. +type literalPrefixInsensitiveStringMatcher struct { + prefix string + + // The matcher that must match the right side. Can be nil. + right StringMatcher +} + +func (m *literalPrefixInsensitiveStringMatcher) Matches(s string) bool { + if !hasPrefixCaseInsensitive(s, m.prefix) { return false } @@ -708,17 +732,20 @@ func (m *equalStringMatcher) Matches(s string) bool { type multiStringMatcherBuilder interface { StringMatcher add(s string) + addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) setMatches() []string } -func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize int) multiStringMatcherBuilder { +func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize, estimatedPrefixes, minPrefixLength int) multiStringMatcherBuilder { // If the estimated size is low enough, it's faster to use a slice instead of a map. - if estimatedSize < minEqualMultiStringMatcherMapThreshold { + if estimatedSize < minEqualMultiStringMatcherMapThreshold && estimatedPrefixes == 0 { return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)} } return &equalMultiStringMapMatcher{ values: make(map[string]struct{}, estimatedSize), + prefixes: make(map[string][]StringMatcher, estimatedPrefixes), + minPrefixLen: minPrefixLength, caseSensitive: caseSensitive, } } @@ -734,6 +761,10 @@ func (m *equalMultiStringSliceMatcher) add(s string) { m.values = append(m.values, s) } +func (m *equalMultiStringSliceMatcher) addPrefix(_ string, _ bool, _ StringMatcher) { + panic("not implemented") +} + func (m *equalMultiStringSliceMatcher) setMatches() []string { return m.values } @@ -755,25 +786,49 @@ func (m *equalMultiStringSliceMatcher) Matches(s string) bool { return false } -// equalMultiStringMapMatcher matches a string exactly against a map of valid values. +// equalMultiStringMapMatcher matches a string exactly against a map of valid values +// or against a set of prefix matchers. type equalMultiStringMapMatcher struct { // values contains values to match a string against. If the matching is case insensitive, // the values here must be lowercase. values map[string]struct{} - + // prefixes maps strings, all of length minPrefixLen, to sets of matchers to check the rest of the string. + // If the matching is case insensitive, prefixes are all lowercase. + prefixes map[string][]StringMatcher + // minPrefixLen can be zero, meaning there are no prefix matchers. + minPrefixLen int caseSensitive bool } func (m *equalMultiStringMapMatcher) add(s string) { if !m.caseSensitive { - s = strings.ToLower(s) + s = toNormalisedLower(s) } m.values[s] = struct{}{} } +func (m *equalMultiStringMapMatcher) addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) { + if m.minPrefixLen == 0 { + panic("addPrefix called when no prefix length defined") + } + if len(prefix) < m.minPrefixLen { + panic("addPrefix called with a too short prefix") + } + if m.caseSensitive != prefixCaseSensitive { + panic("addPrefix called with a prefix whose case sensitivity is different than the expected one") + } + + s := prefix[:m.minPrefixLen] + if !m.caseSensitive { + s = strings.ToLower(s) + } + + m.prefixes[s] = append(m.prefixes[s], matcher) +} + func (m *equalMultiStringMapMatcher) setMatches() []string { - if len(m.values) >= maxSetMatches { + if len(m.values) >= maxSetMatches || len(m.prefixes) > 0 { return nil } @@ -786,11 +841,42 @@ func (m *equalMultiStringMapMatcher) setMatches() []string { func (m *equalMultiStringMapMatcher) Matches(s string) bool { if !m.caseSensitive { - s = strings.ToLower(s) + s = toNormalisedLower(s) } - _, ok := m.values[s] - return ok + if _, ok := m.values[s]; ok { + return true + } + if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen { + for _, matcher := range m.prefixes[s[:m.minPrefixLen]] { + if matcher.Matches(s) { + return true + } + } + } + return false +} + +// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert +// it to lower case. +func toNormalisedLower(s string) string { + var buf []byte + for i := 0; i < len(s); i++ { + c := s[i] + if c >= utf8.RuneSelf { + return strings.Map(unicode.ToLower, norm.NFKD.String(s)) + } + if 'A' <= c && c <= 'Z' { + if buf == nil { + buf = []byte(s) + } + buf[i] = c + 'a' - 'A' + } + } + if buf == nil { + return s + } + return yoloString(buf) } // anyStringWithoutNewlineMatcher is a stringMatcher which matches any string @@ -827,8 +913,12 @@ type zeroOrOneCharacterStringMatcher struct { } func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool { - // Zero or one. - if len(s) > 1 { + // If there's more than one rune in the string, then it can't match. + if r, size := utf8.DecodeRuneInString(s); r == utf8.RuneError { + // Size is 0 for empty strings, 1 for invalid rune. + // Empty string matches, invalid rune matches if there isn't anything else. + return size == len(s) + } else if size < len(s) { return false } @@ -847,20 +937,24 @@ func (m trueMatcher) Matches(_ string) bool { return true } -// optimizeEqualStringMatchers optimize a specific case where all matchers are made by an -// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher). In -// this specific case, when we have many strings to match against we can use a map instead +// optimizeEqualOrPrefixStringMatchers optimize a specific case where all matchers are made by an +// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher) or +// with a literal prefix (literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher). +// +// In this specific case, when we have many strings to match against we can use a map instead // of iterating over the list of strings. -func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatcher { +func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) StringMatcher { var ( caseSensitive bool caseSensitiveSet bool numValues int + numPrefixes int + minPrefixLength int ) // Analyse the input StringMatcher to count the number of occurrences // and ensure all of them have the same case sensitivity. - analyseCallback := func(matcher *equalStringMatcher) bool { + analyseEqualMatcherCallback := func(matcher *equalStringMatcher) bool { // Ensure we don't have mixed case sensitivity. if caseSensitiveSet && caseSensitive != matcher.caseSensitive { return false @@ -873,34 +967,55 @@ func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatch return true } - if !findEqualStringMatchers(input, analyseCallback) { + analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + // Ensure we don't have mixed case sensitivity. + if caseSensitiveSet && caseSensitive != prefixCaseSensitive { + return false + } else if !caseSensitiveSet { + caseSensitive = prefixCaseSensitive + caseSensitiveSet = true + } + if numPrefixes == 0 || len(prefix) < minPrefixLength { + minPrefixLength = len(prefix) + } + + numPrefixes++ + return true + } + + if !findEqualOrPrefixStringMatchers(input, analyseEqualMatcherCallback, analysePrefixMatcherCallback) { return input } - // If the number of values found is less than the threshold, then we should skip the optimization. - if numValues < threshold { + // If the number of values and prefixes found is less than the threshold, then we should skip the optimization. + if (numValues + numPrefixes) < threshold { return input } // Parse again the input StringMatcher to extract all values and storing them. // We can skip the case sensitivity check because we've already checked it and // if the code reach this point then it means all matchers have the same case sensitivity. - multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues) + multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues, numPrefixes, minPrefixLength) // Ignore the return value because we already iterated over the input StringMatcher // and it was all good. - findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool { + findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { multiMatcher.add(matcher.s) return true + }, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + multiMatcher.addPrefix(prefix, caseSensitive, matcher) + return true }) return multiMatcher } -// findEqualStringMatchers analyze the input StringMatcher and calls the callback for each -// equalStringMatcher found. Returns true if and only if the input StringMatcher is *only* -// composed by an alternation of equalStringMatcher. -func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalStringMatcher) bool) bool { +// findEqualOrPrefixStringMatchers analyze the input StringMatcher and calls the equalMatcherCallback for each +// equalStringMatcher found, and prefixMatcherCallback for each literalPrefixSensitiveStringMatcher and literalPrefixInsensitiveStringMatcher found. +// +// Returns true if and only if the input StringMatcher is *only* composed by an alternation of equalStringMatcher and/or +// literal prefix matcher. Returns false if prefixMatcherCallback is nil and a literal prefix matcher is encountered. +func findEqualOrPrefixStringMatchers(input StringMatcher, equalMatcherCallback func(matcher *equalStringMatcher) bool, prefixMatcherCallback func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool) bool { orInput, ok := input.(orStringMatcher) if !ok { return false @@ -909,17 +1024,27 @@ func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalSt for _, m := range orInput { switch casted := m.(type) { case orStringMatcher: - if !findEqualStringMatchers(m, callback) { + if !findEqualOrPrefixStringMatchers(m, equalMatcherCallback, prefixMatcherCallback) { return false } case *equalStringMatcher: - if !callback(casted) { + if !equalMatcherCallback(casted) { + return false + } + + case *literalPrefixSensitiveStringMatcher: + if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, true, casted) { + return false + } + + case *literalPrefixInsensitiveStringMatcher: + if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, false, casted) { return false } default: - // It's not an equal string matcher, so we have to stop searching + // It's not an equal or prefix string matcher, so we have to stop searching // cause this optimization can't be applied. return false } @@ -935,3 +1060,27 @@ func hasPrefixCaseInsensitive(s, prefix string) bool { func hasSuffixCaseInsensitive(s, suffix string) bool { return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix) } + +func containsInOrder(s string, contains []string) bool { + // Optimization for the case we only have to look for 1 substring. + if len(contains) == 1 { + return strings.Contains(s, contains[0]) + } + + return containsInOrderMulti(s, contains) +} + +func containsInOrderMulti(s string, contains []string) bool { + offset := 0 + + for _, substr := range contains { + at := strings.Index(s[offset:], substr) + if at == -1 { + return false + } + + offset += at + len(substr) + } + + return true +} diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index 3a15b52b40..24875e64ef 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -19,6 +19,7 @@ import ( "strings" "testing" "time" + "unicode/utf8" "github.com/grafana/regexp" "github.com/grafana/regexp/syntax" @@ -36,6 +37,7 @@ var ( ".*foo", "^.*foo$", "^.+foo$", + ".?", ".*", ".+", "foo.+", @@ -68,6 +70,9 @@ var ( "jyyfj00j0061|jyyfj00j0062|jyyfj94j0093|jyyfj99j0093|jyyfm01j0021|jyyfm02j0021|jyefj00j0192|jyefj00j0193|jyefj00j0194|jyefj00j0195|jyefj00j0196|jyefj00j0197|jyefj00j0290|jyefj00j0291|jyefj00j0292|jyefj00j0293|jyefj00j0294|jyefj00j0295|jyefj00j0296|jyefj00j0297|jyefj89j0394|jyefj90j0394|jyefj91j0394|jyefj95j0347|jyefj96j0322|jyefj96j0347|jyefj97j0322|jyefj97j0347|jyefj98j0322|jyefj98j0347|jyefj99j0320|jyefj99j0322|jyefj99j0323|jyefj99j0335|jyefj99j0336|jyefj99j0344|jyefj99j0347|jyefj99j0349|jyefj99j0351|jyeff00j0117|lyyfm01j0025|lyyfm01j0028|lyyfm01j0041|lyyfm01j0133|lyyfm01j0701|lyyfm02j0025|lyyfm02j0028|lyyfm02j0041|lyyfm02j0133|lyyfm02j0701|lyyfm03j0701|lyefj00j0775|lyefj00j0776|lyefj00j0777|lyefj00j0778|lyefj00j0779|lyefj00j0780|lyefj00j0781|lyefj00j0782|lyefj50j3807|lyefj50j3852|lyefj51j3807|lyefj51j3852|lyefj52j3807|lyefj52j3852|lyefj53j3807|lyefj53j3852|lyefj54j3807|lyefj54j3852|lyefj54j3886|lyefj55j3807|lyefj55j3852|lyefj55j3886|lyefj56j3807|lyefj56j3852|lyefj56j3886|lyefj57j3807|lyefj57j3852|lyefj57j3886|lyefj58j3807|lyefj58j3852|lyefj58j3886|lyefj59j3807|lyefj59j3852|lyefj59j3886|lyefj60j3807|lyefj60j3852|lyefj60j3886|lyefj61j3807|lyefj61j3852|lyefj61j3886|lyefj62j3807|lyefj62j3852|lyefj62j3886|lyefj63j3807|lyefj63j3852|lyefj63j3886|lyefj64j3807|lyefj64j3852|lyefj64j3886|lyefj65j3807|lyefj65j3852|lyefj65j3886|lyefj66j3807|lyefj66j3852|lyefj66j3886|lyefj67j3807|lyefj67j3852|lyefj67j3886|lyefj68j3807|lyefj68j3852|lyefj68j3886|lyefj69j3807|lyefj69j3846|lyefj69j3852|lyefj69j3886|lyefj70j3807|lyefj70j3846|lyefj70j3852|lyefj70j3886|lyefj71j3807|lyefj71j3846|lyefj71j3852|lyefj71j3886|lyefj72j3807|lyefj72j3846|lyefj72j3852|lyefj72j3886|lyefj73j3807|lyefj73j3846|lyefj73j3852|lyefj73j3886|lyefj74j3807|lyefj74j3846|lyefj74j3852|lyefj74j3886|lyefj75j3807|lyefj75j3808|lyefj75j3846|lyefj75j3852|lyefj75j3886|lyefj76j3732|lyefj76j3807|lyefj76j3808|lyefj76j3846|lyefj76j3852|lyefj76j3886|lyefj77j3732|lyefj77j3807|lyefj77j3808|lyefj77j3846|lyefj77j3852|lyefj77j3886|lyefj78j3278|lyefj78j3732|lyefj78j3807|lyefj78j3808|lyefj78j3846|lyefj78j3852|lyefj78j3886|lyefj79j3732|lyefj79j3807|lyefj79j3808|lyefj79j3846|lyefj79j3852|lyefj79j3886|lyefj80j3732|lyefj80j3807|lyefj80j3808|lyefj80j3846|lyefj80j3852|lyefj80j3886|lyefj81j3732|lyefj81j3807|lyefj81j3808|lyefj81j3846|lyefj81j3852|lyefj81j3886|lyefj82j3732|lyefj82j3807|lyefj82j3808|lyefj82j3846|lyefj82j3852|lyefj82j3886|lyefj83j3732|lyefj83j3807|lyefj83j3808|lyefj83j3846|lyefj83j3852|lyefj83j3886|lyefj84j3732|lyefj84j3807|lyefj84j3808|lyefj84j3846|lyefj84j3852|lyefj84j3886|lyefj85j3732|lyefj85j3807|lyefj85j3808|lyefj85j3846|lyefj85j3852|lyefj85j3886|lyefj86j3278|lyefj86j3732|lyefj86j3807|lyefj86j3808|lyefj86j3846|lyefj86j3852|lyefj86j3886|lyefj87j3278|lyefj87j3732|lyefj87j3807|lyefj87j3808|lyefj87j3846|lyefj87j3852|lyefj87j3886|lyefj88j3732|lyefj88j3807|lyefj88j3808|lyefj88j3846|lyefj88j3852|lyefj88j3886|lyefj89j3732|lyefj89j3807|lyefj89j3808|lyefj89j3846|lyefj89j3852|lyefj89j3886|lyefj90j3732|lyefj90j3807|lyefj90j3808|lyefj90j3846|lyefj90j3852|lyefj90j3886|lyefj91j3732|lyefj91j3807|lyefj91j3808|lyefj91j3846|lyefj91j3852|lyefj91j3886|lyefj92j3732|lyefj92j3807|lyefj92j3808|lyefj92j3846|lyefj92j3852|lyefj92j3886|lyefj93j3732|lyefj93j3807|lyefj93j3808|lyefj93j3846|lyefj93j3852|lyefj93j3885|lyefj93j3886|lyefj94j3525|lyefj94j3732|lyefj94j3807|lyefj94j3808|lyefj94j3846|lyefj94j3852|lyefj94j3885|lyefj94j3886|lyefj95j3525|lyefj95j3732|lyefj95j3807|lyefj95j3808|lyefj95j3846|lyefj95j3852|lyefj95j3886|lyefj96j3732|lyefj96j3803|lyefj96j3807|lyefj96j3808|lyefj96j3846|lyefj96j3852|lyefj96j3886|lyefj97j3333|lyefj97j3732|lyefj97j3792|lyefj97j3803|lyefj97j3807|lyefj97j3808|lyefj97j3838|lyefj97j3843|lyefj97j3846|lyefj97j3852|lyefj97j3886|lyefj98j3083|lyefj98j3333|lyefj98j3732|lyefj98j3807|lyefj98j3808|lyefj98j3838|lyefj98j3843|lyefj98j3846|lyefj98j3852|lyefj98j3873|lyefj98j3877|lyefj98j3882|lyefj98j3886|lyefj99j2984|lyefj99j3083|lyefj99j3333|lyefj99j3732|lyefj99j3807|lyefj99j3808|lyefj99j3846|lyefj99j3849|lyefj99j3852|lyefj99j3873|lyefj99j3877|lyefj99j3882|lyefj99j3884|lyefj99j3886|lyeff00j0106|lyeff00j0107|lyeff00j0108|lyeff00j0129|lyeff00j0130|lyeff00j0131|lyeff00j0132|lyeff00j0133|lyeff00j0134|lyeff00j0444|lyeff00j0445|lyeff91j0473|lyeff92j0473|lyeff92j3877|lyeff93j3877|lyeff94j0501|lyeff94j3525|lyeff94j3877|lyeff95j0501|lyeff95j3525|lyeff95j3877|lyeff96j0503|lyeff96j3877|lyeff97j3877|lyeff98j3333|lyeff98j3877|lyeff99j2984|lyeff99j3333|lyeff99j3877|mfyr9149ej|mfyr9149ek|mfyr9156ej|mfyr9156ek|mfyr9157ej|mfyr9157ek|mfyr9159ej|mfyr9159ek|mfyr9203ej|mfyr9204ej|mfyr9205ej|mfyr9206ej|mfyr9207ej|mfyr9207ek|mfyr9217ej|mfyr9217ek|mfyr9222ej|mfyr9222ek|mfyu0185ej|mfye9187ej|mfye9187ek|mfye9188ej|mfye9188ek|mfye9189ej|mfye9189ek|mfyf0185ej|oyefj87j0007|oyefj88j0007|oyefj89j0007|oyefj90j0007|oyefj91j0007|oyefj95j0001|oyefj96j0001|oyefj98j0004|oyefj99j0004|oyeff91j0004|oyeff92j0004|oyeff93j0004|oyeff94j0004|oyeff95j0004|oyeff96j0004|rklvyaxmany|ryefj93j0001|ryefj94j0001|tyyfj00a0001|tyyfj84j0005|tyyfj85j0005|tyyfj86j0005|tyyfj87j0005|tyyfj88j0005|tyyfj89j0005|tyyfj90j0005|tyyfj91j0005|tyyfj92j0005|tyyfj93j0005|tyyfj94j0005|tyyfj95j0005|tyyfj96j0005|tyyfj97j0005|tyyfj98j0005|tyyfj99j0005|tyefj50j0015|tyefj50j0017|tyefj50j0019|tyefj50j0020|tyefj50j0021|tyefj51j0015|tyefj51j0017|tyefj51j0019|tyefj51j0020|tyefj51j0021|tyefj52j0015|tyefj52j0017|tyefj52j0019|tyefj52j0020|tyefj52j0021|tyefj53j0015|tyefj53j0017|tyefj53j0019|tyefj53j0020|tyefj53j0021|tyefj54j0015|tyefj54j0017|tyefj54j0019|tyefj54j0020|tyefj54j0021|tyefj55j0015|tyefj55j0017|tyefj55j0019|tyefj55j0020|tyefj55j0021|tyefj56j0015|tyefj56j0017|tyefj56j0019|tyefj56j0020|tyefj56j0021|tyefj57j0015|tyefj57j0017|tyefj57j0019|tyefj57j0020|tyefj57j0021|tyefj58j0015|tyefj58j0017|tyefj58j0019|tyefj58j0020|tyefj58j0021|tyefj59j0015|tyefj59j0017|tyefj59j0019|tyefj59j0020|tyefj59j0021|tyefj60j0015|tyefj60j0017|tyefj60j0019|tyefj60j0020|tyefj60j0021|tyefj61j0015|tyefj61j0017|tyefj61j0019|tyefj61j0020|tyefj61j0021|tyefj62j0015|tyefj62j0017|tyefj62j0019|tyefj62j0020|tyefj62j0021|tyefj63j0015|tyefj63j0017|tyefj63j0019|tyefj63j0020|tyefj63j0021|tyefj64j0015|tyefj64j0017|tyefj64j0019|tyefj64j0020|tyefj64j0021|tyefj65j0015|tyefj65j0017|tyefj65j0019|tyefj65j0020|tyefj65j0021|tyefj66j0015|tyefj66j0017|tyefj66j0019|tyefj66j0020|tyefj66j0021|tyefj67j0015|tyefj67j0017|tyefj67j0019|tyefj67j0020|tyefj67j0021|tyefj68j0015|tyefj68j0017|tyefj68j0019|tyefj68j0020|tyefj68j0021|tyefj69j0015|tyefj69j0017|tyefj69j0019|tyefj69j0020|tyefj69j0021|tyefj70j0015|tyefj70j0017|tyefj70j0019|tyefj70j0020|tyefj70j0021|tyefj71j0015|tyefj71j0017|tyefj71j0019|tyefj71j0020|tyefj71j0021|tyefj72j0015|tyefj72j0017|tyefj72j0019|tyefj72j0020|tyefj72j0021|tyefj72j0022|tyefj73j0015|tyefj73j0017|tyefj73j0019|tyefj73j0020|tyefj73j0021|tyefj73j0022|tyefj74j0015|tyefj74j0017|tyefj74j0019|tyefj74j0020|tyefj74j0021|tyefj74j0022|tyefj75j0015|tyefj75j0017|tyefj75j0019|tyefj75j0020|tyefj75j0021|tyefj75j0022|tyefj76j0015|tyefj76j0017|tyefj76j0019|tyefj76j0020|tyefj76j0021|tyefj76j0022|tyefj76j0119|tyefj77j0015|tyefj77j0017|tyefj77j0019|tyefj77j0020|tyefj77j0021|tyefj77j0022|tyefj77j0119|tyefj78j0015|tyefj78j0017|tyefj78j0019|tyefj78j0020|tyefj78j0021|tyefj78j0022|tyefj78j0119|tyefj79j0015|tyefj79j0017|tyefj79j0019|tyefj79j0020|tyefj79j0021|tyefj79j0022|tyefj79j0119|tyefj80j0015|tyefj80j0017|tyefj80j0019|tyefj80j0020|tyefj80j0021|tyefj80j0022|tyefj80j0114|tyefj80j0119|tyefj81j0015|tyefj81j0017|tyefj81j0019|tyefj81j0020|tyefj81j0021|tyefj81j0022|tyefj81j0114|tyefj81j0119|tyefj82j0015|tyefj82j0017|tyefj82j0019|tyefj82j0020|tyefj82j0021|tyefj82j0022|tyefj82j0119|tyefj83j0015|tyefj83j0017|tyefj83j0019|tyefj83j0020|tyefj83j0021|tyefj83j0022|tyefj83j0119|tyefj84j0014|tyefj84j0015|tyefj84j0017|tyefj84j0019|tyefj84j0020|tyefj84j0021|tyefj84j0022|tyefj84j0119|tyefj85j0014|tyefj85j0015|tyefj85j0017|tyefj85j0019|tyefj85j0020|tyefj85j0021|tyefj85j0022|tyefj85j0119|tyefj86j0014|tyefj86j0015|tyefj86j0017|tyefj86j0019|tyefj86j0020|tyefj86j0021|tyefj86j0022|tyefj87j0014|tyefj87j0015|tyefj87j0017|tyefj87j0019|tyefj87j0020|tyefj87j0021|tyefj87j0022|tyefj88j0014|tyefj88j0015|tyefj88j0017|tyefj88j0019|tyefj88j0020|tyefj88j0021|tyefj88j0022|tyefj88j0100|tyefj88j0115|tyefj89j0003|tyefj89j0014|tyefj89j0015|tyefj89j0017|tyefj89j0019|tyefj89j0020|tyefj89j0021|tyefj89j0022|tyefj89j0100|tyefj89j0115|tyefj90j0014|tyefj90j0015|tyefj90j0016|tyefj90j0017|tyefj90j0018|tyefj90j0019|tyefj90j0020|tyefj90j0021|tyefj90j0022|tyefj90j0100|tyefj90j0111|tyefj90j0115|tyefj91j0014|tyefj91j0015|tyefj91j0016|tyefj91j0017|tyefj91j0018|tyefj91j0019|tyefj91j0020|tyefj91j0021|tyefj91j0022|tyefj91j0100|tyefj91j0111|tyefj91j0115|tyefj92j0014|tyefj92j0015|tyefj92j0016|tyefj92j0017|tyefj92j0018|tyefj92j0019|tyefj92j0020|tyefj92j0021|tyefj92j0022|tyefj92j0100|tyefj92j0105|tyefj92j0115|tyefj92j0121|tyefj93j0004|tyefj93j0014|tyefj93j0015|tyefj93j0017|tyefj93j0018|tyefj93j0019|tyefj93j0020|tyefj93j0021|tyefj93j0022|tyefj93j0100|tyefj93j0105|tyefj93j0115|tyefj93j0121|tyefj94j0002|tyefj94j0004|tyefj94j0008|tyefj94j0014|tyefj94j0015|tyefj94j0017|tyefj94j0019|tyefj94j0020|tyefj94j0021|tyefj94j0022|tyefj94j0084|tyefj94j0088|tyefj94j0100|tyefj94j0106|tyefj94j0116|tyefj94j0121|tyefj94j0123|tyefj95j0002|tyefj95j0004|tyefj95j0008|tyefj95j0014|tyefj95j0015|tyefj95j0017|tyefj95j0019|tyefj95j0020|tyefj95j0021|tyefj95j0022|tyefj95j0084|tyefj95j0088|tyefj95j0100|tyefj95j0101|tyefj95j0106|tyefj95j0112|tyefj95j0116|tyefj95j0121|tyefj95j0123|tyefj96j0014|tyefj96j0015|tyefj96j0017|tyefj96j0019|tyefj96j0020|tyefj96j0021|tyefj96j0022|tyefj96j0082|tyefj96j0084|tyefj96j0100|tyefj96j0101|tyefj96j0112|tyefj96j0117|tyefj96j0121|tyefj96j0124|tyefj97j0014|tyefj97j0015|tyefj97j0017|tyefj97j0019|tyefj97j0020|tyefj97j0021|tyefj97j0022|tyefj97j0081|tyefj97j0087|tyefj97j0098|tyefj97j0100|tyefj97j0107|tyefj97j0109|tyefj97j0113|tyefj97j0117|tyefj97j0118|tyefj97j0121|tyefj98j0003|tyefj98j0006|tyefj98j0014|tyefj98j0015|tyefj98j0017|tyefj98j0019|tyefj98j0020|tyefj98j0021|tyefj98j0022|tyefj98j0083|tyefj98j0085|tyefj98j0086|tyefj98j0100|tyefj98j0104|tyefj98j0118|tyefj98j0121|tyefj99j0003|tyefj99j0006|tyefj99j0007|tyefj99j0014|tyefj99j0015|tyefj99j0017|tyefj99j0019|tyefj99j0020|tyefj99j0021|tyefj99j0022|tyefj99j0023|tyefj99j0100|tyefj99j0108|tyefj99j0110|tyefj99j0121|tyefj99j0125|tyeff94j0002|tyeff94j0008|tyeff94j0010|tyeff94j0011|tyeff94j0035|tyeff95j0002|tyeff95j0006|tyeff95j0008|tyeff95j0010|tyeff95j0011|tyeff95j0035|tyeff96j0003|tyeff96j0006|tyeff96j0009|tyeff96j0010|tyeff97j0004|tyeff97j0009|tyeff97j0116|tyeff98j0007|tyeff99j0007|tyeff99j0125|uyyfj00j0484|uyyfj00j0485|uyyfj00j0486|uyyfj00j0487|uyyfj00j0488|uyyfj00j0489|uyyfj00j0490|uyyfj00j0491|uyyfj00j0492|uyyfj00j0493|uyyfj00j0494|uyyfj00j0495|uyyfj00j0496|uyyfj00j0497|uyyfj00j0498|uyyfj00j0499|uyyfj00j0500|uyyfj00j0501|uyyfj00j0502|uyyfj00j0503|uyyfj00j0504|uyyfj00j0505|uyyfj00j0506|uyyfj00j0507|uyyfj00j0508|uyyfj00j0509|uyyfj00j0510|uyyfj00j0511|uyyfj00j0512|uyyfj00j0513|uyyfj00j0514|uyyfj00j0515|uyyfj00j0516|uyyfj00j0517|uyyfj00j0518|uyyfj00j0519|uyyfj00j0520|uyyfj00j0521|uyyfj00j0522|uyyfj00j0523|uyyfj00j0524|uyyfj00j0525|uyyfj00j0526|uyyfj00j0527|uyyfj00j0528|uyyfj00j0529|uyyfj00j0530|uyyfj00j0531|uyyfj00j0532|uyyfj00j0533|uyyfj00j0534|uyyfj00j0535|uyyfj00j0536|uyyfj00j0537|uyyfj00j0538|uyyfj00j0539|uyyfj00j0540|uyyfj00j0541|uyyfj00j0542|uyyfj00j0543|uyyfj00j0544|uyyfj00j0545|uyyfj00j0546|uyyfj00j0547|uyyfj00j0548|uyyfj00j0549|uyyfj00j0550|uyyfj00j0551|uyyfj00j0553|uyyfj00j0554|uyyfj00j0555|uyyfj00j0556|uyyfj00j0557|uyyfj00j0558|uyyfj00j0559|uyyfj00j0560|uyyfj00j0561|uyyfj00j0562|uyyfj00j0563|uyyfj00j0564|uyyfj00j0565|uyyfj00j0566|uyyfj00j0614|uyyfj00j0615|uyyfj00j0616|uyyfj00j0617|uyyfj00j0618|uyyfj00j0619|uyyfj00j0620|uyyfj00j0621|uyyfj00j0622|uyyfj00j0623|uyyfj00j0624|uyyfj00j0625|uyyfj00j0626|uyyfj00j0627|uyyfj00j0628|uyyfj00j0629|uyyfj00j0630|uyyfj00j0631|uyyfj00j0632|uyyfj00j0633|uyyfj00j0634|uyyfj00j0635|uyyfj00j0636|uyyfj00j0637|uyyfj00j0638|uyyfj00j0639|uyyfj00j0640|uyyfj00j0641|uyyfj00j0642|uyyfj00j0643|uyyfj00j0644|uyyfj00j0645|uyyfj00j0646|uyyfj00j0647|uyyfj00j0648|uyyfj00j0649|uyyfj00j0650|uyyfj00j0651|uyyfj00j0652|uyyfj00j0653|uyyfj00j0654|uyyfj00j0655|uyyfj00j0656|uyyfj00j0657|uyyfj00j0658|uyyfj00j0659|uyyfj00j0660|uyyfj00j0661|uyyfj00j0662|uyyfj00j0663|uyyfj00j0664|uyyfj00j0665|uyyfj00j0666|uyyfj00j0667|uyyfj00j0668|uyyfj00j0669|uyyfj00j0670|uyyfj00j0671|uyyfj00j0672|uyyfj00j0673|uyyfj00j0674|uyyfj00j0675|uyyfj00j0676|uyyfj00j0677|uyyfj00j0678|uyyfj00j0679|uyyfj00j0680|uyyfj00j0681|uyyfj00j0682|uyyfj00j0683|uyyfj00j0684|uyyfj00j0685|uyyfj00j0686|uyyfj00j0687|uyyfj00j0688|uyyfj00j0689|uyyfj00j0690|uyyfj00j0691|uyyfj00j0692|uyyfj00j0693|uyyfj00j0694|uyyfj00j0695|uyyfj00j0696|uyyfj00j0697|uyyfj00j0698|uyyfj00j0699|uyyfj00j0700|uyyfj00j0701|uyyfj00j0702|uyyfj00j0703|uyyfj00j0704|uyyfj00j0705|uyyfj00j0706|uyyfj00j0707|uyyfj00j0708|uyyfj00j0709|uyyfj00j0710|uyyfj00j0711|uyyfj00j0712|uyyfj00j0713|uyyfj00j0714|uyyfj00j0715|uyyfj00j0716|uyyfj00j0717|uyyfj00j0718|uyyfj00j0719|uyyfj00j0720|uyyfj00j0721|uyyfj00j0722|uyyfj00j0723|uyyfj00j0724|uyyfj00j0725|uyyfj00j0726|uyyfj00j0727|uyyfj00j0728|uyyfj00j0729|uyyfj00j0730|uyyfj00j0731|uyyfj00j0732|uyyfj00j0733|uyyfj00j0734|uyyfj00j0735|uyyfj00j0736|uyyfj00j0737|uyyfj00j0738|uyyfj00j0739|uyyfj00j0740|uyyfj00j0741|uyyfj00j0742|uyyfj00j0743|uyyfj00j0744|uyyfj00j0745|uyyfj00j0746|uyyfj00j0747|uyyfj00j0748|uyyfj00j0749|uyyfj00j0750|uyyfj00j0751|uyyfj00j0752|uyyfj00j0753|uyyfj00j0754|uyyfj00j0755|uyyfj00j0756|uyyfj00j0757|uyyfj00j0758|uyyfj00j0759|uyyfj00j0760|uyyfj00j0761|uyyfj00j0762|uyyfj00j0763|uyyfj00j0764|uyyfj00j0765|uyyfj00j0766|uyyfj00j0767|uyyfj00j0768|uyyfj00j0769|uyyfj00j0770|uyyfj00j0771|uyyfj00j0772|uyyfj00j0773|uyyfj00j0774|uyyfj00j0775|uyyfj00j0776|uyyfj00j0777|uyyfj00j0778|uyyfj00j0779|uyyfj00j0780|uyyfj00j0781|uyyfj00j0782|uyyff00j0011|uyyff00j0031|uyyff00j0032|uyyff00j0033|uyyff00j0034|uyyff99j0012|uyefj00j0071|uyefj00j0455|uyefj00j0456|uyefj00j0582|uyefj00j0583|uyefj00j0584|uyefj00j0585|uyefj00j0586|uyefj00j0590|uyeff00j0188|xyrly-f-jyy-y01|xyrly-f-jyy-y02|xyrly-f-jyy-y03|xyrly-f-jyy-y04|xyrly-f-jyy-y05|xyrly-f-jyy-y06|xyrly-f-jyy-y07|xyrly-f-jyy-y08|xyrly-f-jyy-y09|xyrly-f-jyy-y10|xyrly-f-jyy-y11|xyrly-f-jyy-y12|xyrly-f-jyy-y13|xyrly-f-jyy-y14|xyrly-f-jyy-y15|xyrly-f-jyy-y16|xyrly-f-url-y01|xyrly-f-url-y02|yyefj97j0005|ybyfcy4000|ybyfcy4001|ayefj99j0035|by-b-y-bzu-l01|by-b-y-bzu-l02|by-b-e-079|by-b-e-080|by-b-e-082|by-b-e-083|byefj72j0002|byefj73j0002|byefj74j0002|byefj75j0002|byefj76j0002|byefj77j0002|byefj78j0002|byefj79j0002|byefj91j0007|byefj92j0007|byefj98j0003|byefj99j0003|byefj99j0005|byefj99j0006|byeff88j0002|byeff89j0002|byeff90j0002|byeff91j0002|byeff92j0002|byeff93j0002|byeff96j0003|byeff97j0003|byeff98j0003|byeff99j0003|fymfj98j0001|fymfj99j0001|fyyaj98k0297|fyyaj99k0297|fyyfj00j0109|fyyfj00j0110|fyyfj00j0122|fyyfj00j0123|fyyfj00j0201|fyyfj00j0202|fyyfj00j0207|fyyfj00j0208|fyyfj00j0227|fyyfj00j0228|fyyfj00j0229|fyyfj00j0230|fyyfj00j0231|fyyfj00j0232|fyyfj00j0233|fyyfj00j0234|fyyfj00j0235|fyyfj00j0236|fyyfj00j0237|fyyfj00j0238|fyyfj00j0239|fyyfj00j0240|fyyfj00j0241|fyyfj00j0242|fyyfj00j0243|fyyfj00j0244|fyyfj00j0245|fyyfj00j0246|fyyfj00j0247|fyyfj00j0248|fyyfj00j0249|fyyfj00j0250|fyyfj00j0251|fyyfj00j0252|fyyfj00j0253|fyyfj00j0254|fyyfj00j0255|fyyfj00j0256|fyyfj00j0257|fyyfj00j0258|fyyfj00j0259|fyyfj00j0260|fyyfj00j0261|fyyfj00j0262|fyyfj00j0263|fyyfj00j0264|fyyfj00j0265|fyyfj00j0266|fyyfj00j0267|fyyfj00j0268|fyyfj00j0290|fyyfj00j0291|fyyfj00j0292|fyyfj00j0293|fyyfj00j0294|fyyfj00j0295|fyyfj00j0296|fyyfj00j0297|fyyfj00j0298|fyyfj00j0299|fyyfj00j0300|fyyfj00j0301|fyyfj00j0302|fyyfj00j0303|fyyfj00j0304|fyyfj00j0305|fyyfj00j0306|fyyfj00j0307|fyyfj00j0308|fyyfj00j0309|fyyfj00j0310|fyyfj00j0311|fyyfj00j0312|fyyfj00j0313|fyyfj00j0314|fyyfj00j0315|fyyfj00j0316|fyyfj00j0317|fyyfj00j0318|fyyfj00j0319|fyyfj00j0320|fyyfj00j0321|fyyfj00j0322|fyyfj00j0323|fyyfj00j0324|fyyfj00j0325|fyyfj00j0326|fyyfj00j0327|fyyfj00j0328|fyyfj00j0329|fyyfj00j0330|fyyfj00j0331|fyyfj00j0332|fyyfj00j0333|fyyfj00j0334|fyyfj00j0335|fyyfj00j0340|fyyfj00j0341|fyyfj00j0342|fyyfj00j0343|fyyfj00j0344|fyyfj00j0345|fyyfj00j0346|fyyfj00j0347|fyyfj00j0348|fyyfj00j0349|fyyfj00j0367|fyyfj00j0368|fyyfj00j0369|fyyfj00j0370|fyyfj00j0371|fyyfj00j0372|fyyfj00j0373|fyyfj00j0374|fyyfj00j0375|fyyfj00j0376|fyyfj00j0377|fyyfj00j0378|fyyfj00j0379|fyyfj00j0380|fyyfj00j0381|fyyfj00j0382|fyyfj00j0383|fyyfj00j0384|fyyfj00j0385|fyyfj00j0386|fyyfj00j0387|fyyfj00j0388|fyyfj00j0415|fyyfj00j0416|fyyfj00j0417|fyyfj00j0418|fyyfj00j0419|fyyfj00j0420|fyyfj00j0421|fyyfj00j0422|fyyfj00j0423|fyyfj00j0424|fyyfj00j0425|fyyfj00j0426|fyyfj00j0427|fyyfj00j0428|fyyfj00j0429|fyyfj00j0430|fyyfj00j0431|fyyfj00j0432|fyyfj00j0433|fyyfj00j0434|fyyfj00j0435|fyyfj00j0436|fyyfj00j0437|fyyfj00j0438|fyyfj00j0439|fyyfj00j0440|fyyfj00j0441|fyyfj00j0446|fyyfj00j0447|fyyfj00j0448|fyyfj00j0449|fyyfj00j0451|fyyfj00j0452|fyyfj00j0453|fyyfj00j0454|fyyfj00j0455|fyyfj00j0456|fyyfj00j0457|fyyfj00j0459|fyyfj00j0460|fyyfj00j0461|fyyfj00j0462|fyyfj00j0463|fyyfj00j0464|fyyfj00j0465|fyyfj00j0466|fyyfj00j0467|fyyfj00j0468|fyyfj00j0469|fyyfj00j0470|fyyfj00j0471|fyyfj00j0474|fyyfj00j0475|fyyfj00j0476|fyyfj00j0477|fyyfj00j0478|fyyfj00j0479|fyyfj00j0480|fyyfj00j0481|fyyfj00j0482|fyyfj00j0483|fyyfj00j0484|fyyfj00j0485|fyyfj00j0486|fyyfj00j0487|fyyfj00j0488|fyyfj00j0489|fyyfj00j0490|fyyfj00j0491|fyyfj00j0492|fyyfj00j0493|fyyfj00j0494|fyyfj00j0495|fyyfj00j0496|fyyfj00j0497|fyyfj00j0498|fyyfj00j0499|fyyfj00j0500|fyyfj00j0501|fyyfj00j0502|fyyfj00j0503|fyyfj00j0504|fyyfj00j0505|fyyfj00j0506|fyyfj00j0507|fyyfj00j0508|fyyfj00j0509|fyyfj00j0510|fyyfj00j0511|fyyfj00j0512|fyyfj00j0513|fyyfj00j0514|fyyfj00j0515|fyyfj00j0516|fyyfj00j0517|fyyfj00j0518|fyyfj00j0521|fyyfj00j0522|fyyfj00j0523|fyyfj00j0524|fyyfj00j0526|fyyfj00j0527|fyyfj00j0528|fyyfj00j0529|fyyfj00j0530|fyyfj00j0531|fyyfj00j0532|fyyfj00j0533|fyyfj00j0534|fyyfj00j0535|fyyfj00j0536|fyyfj00j0537|fyyfj00j0538|fyyfj00j0539|fyyfj00j0540|fyyfj00j0541|fyyfj00j0542|fyyfj00j0543|fyyfj00j0544|fyyfj00j0545|fyyfj00j0546|fyyfj00j0564|fyyfj00j0565|fyyfj00j0566|fyyfj00j0567|fyyfj00j0568|fyyfj00j0569|fyyfj00j0570|fyyfj00j0571|fyyfj00j0572|fyyfj00j0574|fyyfj00j0575|fyyfj00j0576|fyyfj00j0577|fyyfj00j0578|fyyfj00j0579|fyyfj00j0580|fyyfj01j0473|fyyfj02j0473|fyyfj36j0289|fyyfj37j0209|fyyfj37j0289|fyyfj38j0209|fyyfj38j0289|fyyfj39j0209|fyyfj39j0289|fyyfj40j0209|fyyfj40j0289|fyyfj41j0209|fyyfj41j0289|fyyfj42j0209|fyyfj42j0289|fyyfj43j0209|fyyfj43j0289|fyyfj44j0209|fyyfj44j0289|fyyfj45j0104|fyyfj45j0209|fyyfj45j0289|fyyfj46j0104|fyyfj46j0209|fyyfj46j0289|fyyfj47j0104|fyyfj47j0209|fyyfj47j0289|fyyfj48j0104|fyyfj48j0209|fyyfj48j0289|fyyfj49j0104|fyyfj49j0209|fyyfj49j0289|fyyfj50j0104|fyyfj50j0209|fyyfj50j0289|fyyfj50j0500|fyyfj51j0104|fyyfj51j0209|fyyfj51j0289|fyyfj51j0500|fyyfj52j0104|fyyfj52j0209|fyyfj52j0289|fyyfj52j0500|fyyfj53j0104|fyyfj53j0209|fyyfj53j0289|fyyfj53j0500|fyyfj54j0104|fyyfj54j0209|fyyfj54j0289|fyyfj54j0500|fyyfj55j0104|fyyfj55j0209|fyyfj55j0289|fyyfj55j0500|fyyfj56j0104|fyyfj56j0209|fyyfj56j0289|fyyfj56j0500|fyyfj57j0104|fyyfj57j0209|fyyfj57j0289|fyyfj57j0500|fyyfj58j0104|fyyfj58j0209|fyyfj58j0289|fyyfj58j0500|fyyfj59j0104|fyyfj59j0209|fyyfj59j0289|fyyfj59j0500|fyyfj60j0104|fyyfj60j0209|fyyfj60j0289|fyyfj60j0500|fyyfj61j0104|fyyfj61j0209|fyyfj61j0289|fyyfj61j0500|fyyfj62j0104|fyyfj62j0209|fyyfj62j0289|fyyfj62j0500|fyyfj63j0104|fyyfj63j0209|fyyfj63j0289|fyyfj63j0500|fyyfj64j0104|fyyfj64j0107|fyyfj64j0209|fyyfj64j0289|fyyfj64j0500|fyyfj64j0573|fyyfj65j0104|fyyfj65j0107|fyyfj65j0209|fyyfj65j0289|fyyfj65j0500|fyyfj65j0573|fyyfj66j0104|fyyfj66j0107|fyyfj66j0209|fyyfj66j0289|fyyfj66j0500|fyyfj66j0573|fyyfj67j0104|fyyfj67j0107|fyyfj67j0209|fyyfj67j0289|fyyfj67j0500|fyyfj67j0573|fyyfj68j0104|fyyfj68j0107|fyyfj68j0209|fyyfj68j0289|fyyfj68j0500|fyyfj68j0573|fyyfj69j0104|fyyfj69j0107|fyyfj69j0209|fyyfj69j0289|fyyfj69j0500|fyyfj69j0573|fyyfj70j0104|fyyfj70j0107|fyyfj70j0209|fyyfj70j0289|fyyfj70j0472|fyyfj70j0500|fyyfj70j0573|fyyfj71j0104|fyyfj71j0107|fyyfj71j0209|fyyfj71j0289|fyyfj71j0472|fyyfj71j0500|fyyfj71j0573|fyyfj72j0104|fyyfj72j0107|fyyfj72j0209|fyyfj72j0289|fyyfj72j0472|fyyfj72j0500|fyyfj72j0573|fyyfj73j0104|fyyfj73j0107|fyyfj73j0209|fyyfj73j0289|fyyfj73j0472|fyyfj73j0500|fyyfj73j0573|fyyfj74j0104|fyyfj74j0107|fyyfj74j0209|fyyfj74j0289|fyyfj74j0472|fyyfj74j0500|fyyfj74j0573|fyyfj75j0104|fyyfj75j0107|fyyfj75j0108|fyyfj75j0209|fyyfj75j0289|fyyfj75j0472|fyyfj75j0500|fyyfj75j0573|fyyfj76j0104|fyyfj76j0107|fyyfj76j0108|fyyfj76j0209|fyyfj76j0289|fyyfj76j0472|fyyfj76j0500|fyyfj76j0573|fyyfj77j0104|fyyfj77j0107|fyyfj77j0108|fyyfj77j0209|fyyfj77j0289|fyyfj77j0472|fyyfj77j0500|fyyfj77j0573|fyyfj78j0104|fyyfj78j0107|fyyfj78j0108|fyyfj78j0209|fyyfj78j0289|fyyfj78j0472|fyyfj78j0500|fyyfj78j0573|fyyfj79j0104|fyyfj79j0107|fyyfj79j0108|fyyfj79j0209|fyyfj79j0289|fyyfj79j0339|fyyfj79j0472|fyyfj79j0500|fyyfj79j0573|fyyfj80j0104|fyyfj80j0107|fyyfj80j0108|fyyfj80j0209|fyyfj80j0289|fyyfj80j0339|fyyfj80j0352|fyyfj80j0472|fyyfj80j0500|fyyfj80j0573|fyyfj81j0104|fyyfj81j0107|fyyfj81j0108|fyyfj81j0209|fyyfj81j0289|fyyfj81j0339|fyyfj81j0352|fyyfj81j0472|fyyfj81j0500|fyyfj81j0573|fyyfj82j0104|fyyfj82j0107|fyyfj82j0108|fyyfj82j0209|fyyfj82j0289|fyyfj82j0339|fyyfj82j0352|fyyfj82j0472|fyyfj82j0500|fyyfj82j0573|fyyfj83j0104|fyyfj83j0107|fyyfj83j0108|fyyfj83j0209|fyyfj83j0289|fyyfj83j0339|fyyfj83j0352|fyyfj83j0472|fyyfj83j0500|fyyfj83j0573|fyyfj84j0104|fyyfj84j0107|fyyfj84j0108|fyyfj84j0209|fyyfj84j0289|fyyfj84j0339|fyyfj84j0352|fyyfj84j0472|fyyfj84j0500|fyyfj84j0573|fyyfj85j0104|fyyfj85j0107|fyyfj85j0108|fyyfj85j0209|fyyfj85j0289|fyyfj85j0301|fyyfj85j0339|fyyfj85j0352|fyyfj85j0472|fyyfj85j0500|fyyfj85j0573|fyyfj86j0104|fyyfj86j0107|fyyfj86j0108|fyyfj86j0209|fyyfj86j0289|fyyfj86j0301|fyyfj86j0339|fyyfj86j0352|fyyfj86j0472|fyyfj86j0500|fyyfj86j0573|fyyfj87j0067|fyyfj87j0104|fyyfj87j0107|fyyfj87j0108|fyyfj87j0209|fyyfj87j0289|fyyfj87j0301|fyyfj87j0339|fyyfj87j0352|fyyfj87j0472|fyyfj87j0500|fyyfj87j0573|fyyfj88j0067|fyyfj88j0104|fyyfj88j0107|fyyfj88j0108|fyyfj88j0209|fyyfj88j0289|fyyfj88j0301|fyyfj88j0339|fyyfj88j0352|fyyfj88j0472|fyyfj88j0500|fyyfj88j0573|fyyfj89j0067|fyyfj89j0104|fyyfj89j0107|fyyfj89j0108|fyyfj89j0209|fyyfj89j0289|fyyfj89j0301|fyyfj89j0339|fyyfj89j0352|fyyfj89j0358|fyyfj89j0472|fyyfj89j0500|fyyfj89j0573|fyyfj90j0067|fyyfj90j0104|fyyfj90j0107|fyyfj90j0108|fyyfj90j0209|fyyfj90j0289|fyyfj90j0301|fyyfj90j0321|fyyfj90j0339|fyyfj90j0352|fyyfj90j0358|fyyfj90j0452|fyyfj90j0472|fyyfj90j0500|fyyfj90j0573|fyyfj91j0067|fyyfj91j0104|fyyfj91j0107|fyyfj91j0108|fyyfj91j0209|fyyfj91j0289|fyyfj91j0301|fyyfj91j0321|fyyfj91j0339|fyyfj91j0352|fyyfj91j0358|fyyfj91j0452|fyyfj91j0472|fyyfj91j0500|fyyfj91j0573|fyyfj92j0067|fyyfj92j0104|fyyfj92j0107|fyyfj92j0108|fyyfj92j0209|fyyfj92j0289|fyyfj92j0301|fyyfj92j0321|fyyfj92j0339|fyyfj92j0352|fyyfj92j0358|fyyfj92j0452|fyyfj92j0472|fyyfj92j0500|fyyfj92j0573|fyyfj93j0067|fyyfj93j0099|fyyfj93j0104|fyyfj93j0107|fyyfj93j0108|fyyfj93j0209|fyyfj93j0289|fyyfj93j0301|fyyfj93j0321|fyyfj93j0352|fyyfj93j0358|fyyfj93j0452|fyyfj93j0472|fyyfj93j0500|fyyfj93j0573|fyyfj94j0067|fyyfj94j0099|fyyfj94j0104|fyyfj94j0107|fyyfj94j0108|fyyfj94j0209|fyyfj94j0211|fyyfj94j0289|fyyfj94j0301|fyyfj94j0321|fyyfj94j0352|fyyfj94j0358|fyyfj94j0359|fyyfj94j0452|fyyfj94j0472|fyyfj94j0500|fyyfj94j0573|fyyfj95j0067|fyyfj95j0099|fyyfj95j0104|fyyfj95j0107|fyyfj95j0108|fyyfj95j0209|fyyfj95j0211|fyyfj95j0289|fyyfj95j0298|fyyfj95j0301|fyyfj95j0321|fyyfj95j0339|fyyfj95j0352|fyyfj95j0358|fyyfj95j0359|fyyfj95j0414|fyyfj95j0452|fyyfj95j0472|fyyfj95j0500|fyyfj95j0573|fyyfj96j0067|fyyfj96j0099|fyyfj96j0104|fyyfj96j0107|fyyfj96j0108|fyyfj96j0209|fyyfj96j0211|fyyfj96j0289|fyyfj96j0298|fyyfj96j0301|fyyfj96j0321|fyyfj96j0339|fyyfj96j0352|fyyfj96j0358|fyyfj96j0359|fyyfj96j0414|fyyfj96j0452|fyyfj96j0472|fyyfj96j0500|fyyfj96j0573|fyyfj97j0067|fyyfj97j0099|fyyfj97j0100|fyyfj97j0104|fyyfj97j0107|fyyfj97j0108|fyyfj97j0209|fyyfj97j0211|fyyfj97j0289|fyyfj97j0298|fyyfj97j0301|fyyfj97j0321|fyyfj97j0339|fyyfj97j0352|fyyfj97j0358|fyyfj97j0359|fyyfj97j0414|fyyfj97j0445|fyyfj97j0452|fyyfj97j0472|fyyfj97j0500|fyyfj97j0573|fyyfj98j0067|fyyfj98j0099|fyyfj98j0100|fyyfj98j0104|fyyfj98j0107|fyyfj98j0108|fyyfj98j0178|fyyfj98j0209|fyyfj98j0211|fyyfj98j0289|fyyfj98j0298|fyyfj98j0301|fyyfj98j0303|fyyfj98j0321|fyyfj98j0339|fyyfj98j0352|fyyfj98j0358|fyyfj98j0359|fyyfj98j0413|fyyfj98j0414|fyyfj98j0445|fyyfj98j0452|fyyfj98j0472|fyyfj98j0500|fyyfj98j0573|fyyfj99j0067|fyyfj99j0099|fyyfj99j0100|fyyfj99j0104|fyyfj99j0107|fyyfj99j0108|fyyfj99j0131|fyyfj99j0209|fyyfj99j0211|fyyfj99j0285|fyyfj99j0289|fyyfj99j0298|fyyfj99j0301|fyyfj99j0303|fyyfj99j0321|fyyfj99j0339|fyyfj99j0352|fyyfj99j0358|fyyfj99j0359|fyyfj99j0413|fyyfj99j0414|fyyfj99j0445|fyyfj99j0452|fyyfj99j0472|fyyfj99j0500|fyyfj99j0573|fyyfm01j0064|fyyfm01j0070|fyyfm01j0071|fyyfm01j0088|fyyfm01j0091|fyyfm01j0108|fyyfm01j0111|fyyfm01j0112|fyyfm01j0114|fyyfm01j0115|fyyfm01j0133|fyyfm01j0140|fyyfm01j0141|fyyfm01j0142|fyyfm01j0143|fyyfm01j0148|fyyfm01j0149|fyyfm01j0152|fyyfm01j0153|fyyfm01j0155|fyyfm01j0159|fyyfm01j0160|fyyfm01j0163|fyyfm01j0165|fyyfm01j0168|fyyfm01j0169|fyyfm01j0221|fyyfm01j0223|fyyfm01j0268|fyyfm01j0271|fyyfm01j0285|fyyfm01j0299|fyyfm01j0320|fyyfm01j0321|fyyfm01j0360|fyyfm01j0369|fyyfm01j0400|fyyfm01j0401|fyyfm01j0411|fyyfm01j0572|fyyfm01j0765|fyyfm02j0064|fyyfm02j0069|fyyfm02j0070|fyyfm02j0071|fyyfm02j0088|fyyfm02j0091|fyyfm02j0108|fyyfm02j0111|fyyfm02j0112|fyyfm02j0114|fyyfm02j0115|fyyfm02j0133|fyyfm02j0140|fyyfm02j0141|fyyfm02j0142|fyyfm02j0143|fyyfm02j0148|fyyfm02j0149|fyyfm02j0152|fyyfm02j0153|fyyfm02j0155|fyyfm02j0159|fyyfm02j0160|fyyfm02j0163|fyyfm02j0165|fyyfm02j0168|fyyfm02j0169|fyyfm02j0221|fyyfm02j0223|fyyfm02j0268|fyyfm02j0271|fyyfm02j0285|fyyfm02j0299|fyyfm02j0320|fyyfm02j0321|fyyfm02j0360|fyyfm02j0369|fyyfm02j0400|fyyfm02j0572|fyyfm02j0765|fyyfm03j0064|fyyfm03j0070|fyyfm03j0091|fyyfm03j0108|fyyfm03j0111|fyyfm03j0115|fyyfm03j0160|fyyfm03j0165|fyyfm03j0299|fyyfm03j0400|fyyfm03j0572|fyyfm04j0111|fyyfm51j0064|fyyfm51j0369|fyyfm52j0064|fyyfm52j0369|fyyfr88j0003|fyyfr89j0003|fyyff98j0071|fyyff98j0303|fyyff99j0029|fyyff99j0303|fyefj00j0112|fyefj00j0545|fyefj00j0546|fyefj00j0633|fyefj00j0634|fyefj00j0635|fyefj00j0636|fyefj00j0637|fyefj00j0649|fyefj00j0651|fyefj00j0652|fyefj00j0656|fyefj00j0657|fyefj00j0658|fyefj00j0659|fyefj00j0660|fyefj00j0685|fyefj00j0686|fyefj00j0688|fyefj00j0701|fyefj00j0702|fyefj00j0703|fyefj00j0715|fyefj00j0720|fyefj00j0721|fyefj00j0722|fyefj00j0724|fyefj00j0725|fyefj00j0726|fyefj00j0731|fyefj00j0751|fyefj00j0752|fyefj00j0756|fyefj00j0757|fyefj00j0758|fyefj00j0759|fyefj00j0761|fyefj00j0762|fyefj00j0763|fyefj00j0764|fyefj00j0768|fyefj00j0769|fyefj00j0785|fyefj00j0786|fyefj00j0789|fyefj00j0790|fyefj00j0793|fyefj00j0794|fyefj00j0803|fyefj00j0811|fyefj00j0821|fyefj00j0822|fyefj00j0823|fyefj00j0824|fyefj00j0825|fyefj00j0826|fyefj00j0827|fyefj00j0828|fyefj00j0829|fyefj00j0831|fyefj00j0832|fyefj00j0833|fyefj00j0838|fyefj00j0839|fyefj00j0840|fyefj00j0854|fyefj00j0855|fyefj00j0856|fyefj00j0859|fyefj00j0860|fyefj00j0861|fyefj00j0869|fyefj00j0870|fyefj00j0879|fyefj00j0887|fyefj00j0888|fyefj00j0889|fyefj00j0900|fyefj00j0901|fyefj00j0903|fyefj00j0904|fyefj00j0905|fyefj00j0959|fyefj00j0960|fyefj00j0961|fyefj00j1004|fyefj00j1005|fyefj00j1012|fyefj00j1013|fyefj00j1014|fyefj00j1015|fyefj00j1016|fyefj00j1017|fyefj00j1018|fyefj00j1019|fyefj00j1020|fyefj00j1021|fyefj00j1218|fyefj00j1219|fyefj00j1220|fyefj00j1221|fyefj00j1222|fyefj00j1811|fyefj00j1854|fyefj00j1855|fyefj00j1856|fyefj01j0707|fyefj02j0707|fyefj03j0707|fyefj66j0001|fyefj67j0001|fyefj68j0001|fyefj68j1064|fyefj69j0001|fyefj69j1064|fyefj70j0001|fyefj70j0859|fyefj70j1064|fyefj71j0001|fyefj71j1064|fyefj72j0001|fyefj72j1064|fyefj73j0001|fyefj73j1064|fyefj74j0001|fyefj74j1064|fyefj75j0001|fyefj75j1064|fyefj75j1092|fyefj76j0001|fyefj76j1064|fyefj76j1092|fyefj77j0001|fyefj77j1064|fyefj77j1092|fyefj78j0001|fyefj78j1064|fyefj78j1092|fyefj79j0001|fyefj79j1064|fyefj79j1092|fyefj80j0001|fyefj80j0859|fyefj80j1064|fyefj80j1077|fyefj80j1092|fyefj81j0001|fyefj81j1064|fyefj81j1077|fyefj81j1092|fyefj82j0001|fyefj82j1064|fyefj82j1092|fyefj83j0001|fyefj83j1064|fyefj83j1092|fyefj84j0001|fyefj84j1064|fyefj84j1092|fyefj85j0001|fyefj85j0356|fyefj85j1064|fyefj85j1092|fyefj86j0001|fyefj86j0356|fyefj86j1064|fyefj87j0001|fyefj87j0356|fyefj87j1064|fyefj88j0001|fyefj88j0356|fyefj88j1064|fyefj89j0001|fyefj89j0356|fyefj89j1064|fyefj89j1067|fyefj90j0001|fyefj90j0758|fyefj90j1021|fyefj90j1064|fyefj90j1067|fyefj91j0001|fyefj91j0758|fyefj91j0791|fyefj91j1021|fyefj91j1064|fyefj91j1067|fyefj91j1077|fyefj92j0001|fyefj92j0359|fyefj92j0678|fyefj92j0758|fyefj92j0791|fyefj92j0867|fyefj92j1021|fyefj92j1064|fyefj92j1077|fyefj93j0001|fyefj93j0359|fyefj93j0678|fyefj93j0758|fyefj93j0791|fyefj93j0867|fyefj93j1010|fyefj93j1021|fyefj93j1049|fyefj93j1064|fyefj93j1077|fyefj94j0001|fyefj94j0678|fyefj94j0758|fyefj94j0791|fyefj94j0867|fyefj94j1010|fyefj94j1021|fyefj94j1049|fyefj94j1064|fyefj94j1070|fyefj94j1077|fyefj94j1085|fyefj95j0001|fyefj95j0678|fyefj95j0758|fyefj95j0791|fyefj95j0867|fyefj95j0965|fyefj95j0966|fyefj95j1010|fyefj95j1011|fyefj95j1021|fyefj95j1055|fyefj95j1064|fyefj95j1069|fyefj95j1077|fyefj95j1085|fyefj95j1089|fyefj96j0001|fyefj96j0106|fyefj96j0671|fyefj96j0678|fyefj96j0758|fyefj96j0791|fyefj96j0814|fyefj96j0836|fyefj96j0867|fyefj96j0931|fyefj96j0965|fyefj96j0966|fyefj96j0976|fyefj96j1010|fyefj96j1021|fyefj96j1051|fyefj96j1055|fyefj96j1064|fyefj96j1068|fyefj96j1070|fyefj96j1077|fyefj96j1079|fyefj96j1081|fyefj96j1086|fyefj96j1088|fyefj96j1091|fyefj96j1093|fyefj96j1094|fyefj97j0001|fyefj97j0106|fyefj97j0584|fyefj97j0586|fyefj97j0671|fyefj97j0678|fyefj97j0758|fyefj97j0791|fyefj97j0814|fyefj97j0825|fyefj97j0836|fyefj97j0863|fyefj97j0865|fyefj97j0867|fyefj97j0914|fyefj97j0931|fyefj97j0952|fyefj97j0965|fyefj97j0966|fyefj97j0969|fyefj97j0971|fyefj97j0972|fyefj97j0976|fyefj97j0985|fyefj97j1010|fyefj97j1021|fyefj97j1051|fyefj97j1052|fyefj97j1055|fyefj97j1058|fyefj97j1059|fyefj97j1064|fyefj97j1068|fyefj97j1077|fyefj97j1079|fyefj97j1081|fyefj97j1086|fyefj97j1088|fyefj97j1095|fyefj98j0001|fyefj98j0243|fyefj98j0326|fyefj98j0329|fyefj98j0343|fyefj98j0344|fyefj98j0380|fyefj98j0472|fyefj98j0584|fyefj98j0586|fyefj98j0604|fyefj98j0671|fyefj98j0673|fyefj98j0676|fyefj98j0677|fyefj98j0678|fyefj98j0694|fyefj98j0758|fyefj98j0814|fyefj98j0825|fyefj98j0836|fyefj98j0863|fyefj98j0865|fyefj98j0867|fyefj98j0896|fyefj98j0898|fyefj98j0901|fyefj98j0906|fyefj98j0910|fyefj98j0913|fyefj98j0914|fyefj98j0922|fyefj98j0931|fyefj98j0934|fyefj98j0936|fyefj98j0951|fyefj98j0952|fyefj98j0963|fyefj98j0965|fyefj98j0966|fyefj98j0969|fyefj98j0971|fyefj98j0972|fyefj98j0974|fyefj98j0975|fyefj98j0976|fyefj98j0977|fyefj98j0978|fyefj98j0985|fyefj98j0992|fyefj98j1008|fyefj98j1009|fyefj98j1010|fyefj98j1011|fyefj98j1012|fyefj98j1019|fyefj98j1021|fyefj98j1028|fyefj98j1034|fyefj98j1039|fyefj98j1046|fyefj98j1047|fyefj98j1048|fyefj98j1054|fyefj98j1055|fyefj98j1064|fyefj98j1068|fyefj98j1077|fyefj98j1079|fyefj98j1080|fyefj98j1081|fyefj98j1082|fyefj98j1084|fyefj98j1087|fyefj98j1088|fyefj98j1090|fyefj99j0010|fyefj99j0188|fyefj99j0243|fyefj99j0268|fyefj99j0280|fyefj99j0301|fyefj99j0329|fyefj99j0343|fyefj99j0344|fyefj99j0380|fyefj99j0552|fyefj99j0573|fyefj99j0584|fyefj99j0586|fyefj99j0604|fyefj99j0671|fyefj99j0673|fyefj99j0676|fyefj99j0677|fyefj99j0678|fyefj99j0694|fyefj99j0722|fyefj99j0757|fyefj99j0758|fyefj99j0771|fyefj99j0772|fyefj99j0804|fyefj99j0806|fyefj99j0809|fyefj99j0814|fyefj99j0825|fyefj99j0836|fyefj99j0862|fyefj99j0863|fyefj99j0865|fyefj99j0866|fyefj99j0867|fyefj99j0875|fyefj99j0896|fyefj99j0898|fyefj99j0901|fyefj99j0906|fyefj99j0907|fyefj99j0908|fyefj99j0910|fyefj99j0912|fyefj99j0913|fyefj99j0914|fyefj99j0921|fyefj99j0922|fyefj99j0923|fyefj99j0931|fyefj99j0934|fyefj99j0936|fyefj99j0937|fyefj99j0949|fyefj99j0951|fyefj99j0952|fyefj99j0962|fyefj99j0963|fyefj99j0965|fyefj99j0966|fyefj99j0969|fyefj99j0971|fyefj99j0972|fyefj99j0974|fyefj99j0975|fyefj99j0976|fyefj99j0977|fyefj99j0978|fyefj99j0982|fyefj99j0985|fyefj99j0986|fyefj99j0988|fyefj99j0991|fyefj99j0992|fyefj99j0995|fyefj99j0997|fyefj99j0999|fyefj99j1003|fyefj99j1006|fyefj99j1008|fyefj99j1009|fyefj99j1010|fyefj99j1011|fyefj99j1016|fyefj99j1019|fyefj99j1020|fyefj99j1021|fyefj99j1024|fyefj99j1026|fyefj99j1028|fyefj99j1031|fyefj99j1033|fyefj99j1034|fyefj99j1036|fyefj99j1039|fyefj99j1042|fyefj99j1045|fyefj99j1046|fyefj99j1048|fyefj99j1053|fyefj99j1054|fyefj99j1055|fyefj99j1061|fyefj99j1062|fyefj99j1063|fyefj99j1064|fyefj99j1068|fyefj99j1072|fyefj99j1076|fyefj99j1077|fyefj99j1079|fyefj99j1080|fyefj99j1081|fyefj99j1083|fyefj99j1084|fyefj99j1087|fyefj99j1088|fyefm00j0113|fyefm01j0057|fyefm01j0088|fyefm01j0091|fyefm01j0101|fyefm01j0104|fyefm01j0107|fyefm01j0112|fyefm01j0379|fyefm02j0057|fyefm02j0101|fyefm02j0104|fyefm02j0107|fyefm02j0112|fyefm02j0379|fyefm98j0066|fyefm99j0066|fyefm99j0090|fyefm99j0093|fyefm99j0110|fyefm99j0165|fyefm99j0208|fyefm99j0209|fyefm99j0295|fyefm99j0401|fyefm99j0402|fyefm99j0907|fyefm99j1054|fyefn98j0015|fyefn98j0024|fyefn98j0030|fyefn99j0015|fyefn99j0024|fyefn99j0030|fyefr94j0559|fyefr95j0559|fyefr96j0559|fyefr97j0559|fyefr98j0559|fyefr99j0012|fyefr99j0559|fyefb01305|fyeff00j0170|fyeff00j0224|fyeff00j0227|fyeff00j0228|fyeff00j0229|fyeff00j0280|fyeff00j0281|fyeff00j0282|fyeff00j0283|fyeff00j0288|fyeff00j0289|fyeff00j0331|fyeff00j0332|fyeff00j0333|fyeff00j0334|fyeff00j0335|fyeff00j0336|fyeff00j0337|fyeff00j0338|fyeff00j0346|fyeff00j0347|fyeff00j0348|fyeff00j0349|fyeff00j0350|fyeff00j0351|fyeff00j0357|fyeff00j0358|fyeff00j0371|fyeff00j0372|fyeff00j0396|fyeff00j0397|fyeff00j0424|fyeff00j0425|fyeff01j0416|fyeff02j0416|fyeff78j0418|fyeff79j0418|fyeff79j1051|fyeff80j1051|fyeff81j1051|fyeff82j1051|fyeff83j1051|fyeff84j1051|fyeff85j1051|fyeff86j1051|fyeff87j1051|fyeff88j0422|fyeff89j0422|fyeff90j0422|fyeff90j0434|fyeff90j0440|fyeff91j0422|fyeff91j0434|fyeff91j0440|fyeff92j0440|fyeff93j0440|fyeff93j1045|fyeff93j1067|fyeff94j0392|fyeff94j0440|fyeff94j0443|fyeff94j1045|fyeff94j1067|fyeff95j0219|fyeff95j0392|fyeff95j0439|fyeff95j0440|fyeff95j0443|fyeff96j0053|fyeff96j0219|fyeff96j0392|fyeff96j0429|fyeff96j0434|fyeff96j0950|fyeff96j1019|fyeff96j1028|fyeff97j0053|fyeff97j0178|fyeff97j0191|fyeff97j0219|fyeff97j0221|fyeff97j0258|fyeff97j0324|fyeff97j0355|fyeff97j0370|fyeff97j0377|fyeff97j0392|fyeff97j0429|fyeff97j0434|fyeff97j0950|fyeff97j1019|fyeff98j0053|fyeff98j0065|fyeff98j0101|fyeff98j0144|fyeff98j0156|fyeff98j0178|fyeff98j0191|fyeff98j0193|fyeff98j0196|fyeff98j0197|fyeff98j0209|fyeff98j0210|fyeff98j0211|fyeff98j0214|fyeff98j0215|fyeff98j0218|fyeff98j0219|fyeff98j0221|fyeff98j0258|fyeff98j0260|fyeff98j0279|fyeff98j0284|fyeff98j0295|fyeff98j0296|fyeff98j0298|fyeff98j0324|fyeff98j0355|fyeff98j0370|fyeff98j0376|fyeff98j0379|fyeff98j0381|fyeff98j0392|fyeff98j0401|fyeff98j0404|fyeff98j0405|fyeff98j0407|fyeff98j0411|fyeff98j0418|fyeff98j0421|fyeff98j0423|fyeff98j0433|fyeff98j0436|fyeff98j0673|fyeff98j0896|fyeff98j0950|fyeff98j0985|fyeff98j1012|fyeff99j0053|fyeff99j0065|fyeff99j0152|fyeff99j0156|fyeff99j0159|fyeff99j0178|fyeff99j0191|fyeff99j0193|fyeff99j0196|fyeff99j0197|fyeff99j0209|fyeff99j0210|fyeff99j0211|fyeff99j0214|fyeff99j0215|fyeff99j0218|fyeff99j0219|fyeff99j0220|fyeff99j0221|fyeff99j0260|fyeff99j0279|fyeff99j0284|fyeff99j0291|fyeff99j0295|fyeff99j0296|fyeff99j0297|fyeff99j0298|fyeff99j0324|fyeff99j0339|fyeff99j0355|fyeff99j0370|fyeff99j0376|fyeff99j0379|fyeff99j0381|fyeff99j0392|fyeff99j0401|fyeff99j0404|fyeff99j0405|fyeff99j0407|fyeff99j0410|fyeff99j0411|fyeff99j0413|fyeff99j0414|fyeff99j0415|fyeff99j0418|fyeff99j0421|fyeff99j0423|fyeff99j0436|fyeff99j0673|fyeff99j0896|fyeff99j0950|fyeff99j0962|fyeff99j0985|fyeff99j1010|fyeff99j1012|fyeff99j1028|fyeff99j1090|fyeff99j1370|fayfm01j0148|fayfm01j0149|fayfm01j0155|fayfm02j0148|fayfm02j0149|fayfm02j0155|faefj00j0594|faefj00j0595|faefj00j0596|faefj00j0597|faefj01j0707|faefj02j0707|faefj03j0707|faefj90j1023|faefj91j1023|faefj92j1023|faefj94j1056|faefj95j1023|faefj95j1056|faefj96j1056|faefj98j1038|faefj99j1078|fdeff99j9001|fdeff99j9002|gyefj99j0005", // A long case insensitive alternation. "(?i:(zQPbMkNO|NNSPdvMi|iWuuSoAl|qbvKMimS|IecrXtPa|seTckYqt|NxnyHkgB|fIDlOgKb|UhlWIygH|OtNoJxHG|cUTkFVIV|mTgFIHjr|jQkoIDtE|PPMKxRXl|AwMfwVkQ|CQyMrTQJ|BzrqxVSi|nTpcWuhF|PertdywG|ZZDgCtXN|WWdDPyyE|uVtNQsKk|BdeCHvPZ|wshRnFlH|aOUIitIp|RxZeCdXT|CFZMslCj|AVBZRDxl|IzIGCnhw|ythYuWiz|oztXVXhl|VbLkwqQx|qvaUgyVC|VawUjPWC|ecloYJuj|boCLTdSU|uPrKeAZx|hrMWLWBq|JOnUNHRM|rYnujkPq|dDEdZhIj|DRrfvugG|yEGfDxVV|YMYdJWuP|PHUQZNWM|AmKNrLis|zTxndVfn|FPsHoJnc|EIulZTua|KlAPhdzg|ScHJJCLt|NtTfMzME|eMCwuFdo|SEpJVJbR|cdhXZeCx|sAVtBwRh|kVFEVcMI|jzJrxraA|tGLHTell|NNWoeSaw|DcOKSetX|UXZAJyka|THpMphDP|rizheevl|kDCBRidd|pCZZRqyu|pSygkitl|SwZGkAaW|wILOrfNX|QkwVOerj|kHOMxPDr|EwOVycJv|AJvtzQFS|yEOjKYYB|LizIINLL|JBRSsfcG|YPiUqqNl|IsdEbvee|MjEpGcBm|OxXZVgEQ|xClXGuxa|UzRCGFEb|buJbvfvA|IPZQxRet|oFYShsMc|oBHffuHO|bzzKrcBR|KAjzrGCl|IPUsAVls|OGMUMbIU|gyDccHuR|bjlalnDd|ZLWjeMna|fdsuIlxQ|dVXtiomV|XxedTjNg|XWMHlNoA|nnyqArQX|opfkWGhb|wYtnhdYb))", + "(?i:(AAAAAAAAAAAAAAAAAAAAAAAA|BBBBBBBBBBBBBBBBBBBBBBBB|cccccccccccccccccccccccC|ſſſſſſſſſſſſſſſſſſſſſſſſS|SSSSSSSSSSSSSSSSSSSSSSSSſ))", + // A short case insensitive alternation where each entry ends with ".*". + "(?i:(zQPbMkNO.*|NNSPdvMi.*|iWuuSoAl.*))", // A long case insensitive alternation where each entry ends with ".*". "(?i:(zQPbMkNO.*|NNSPdvMi.*|iWuuSoAl.*|qbvKMimS.*|IecrXtPa.*|seTckYqt.*|NxnyHkgB.*|fIDlOgKb.*|UhlWIygH.*|OtNoJxHG.*|cUTkFVIV.*|mTgFIHjr.*|jQkoIDtE.*|PPMKxRXl.*|AwMfwVkQ.*|CQyMrTQJ.*|BzrqxVSi.*|nTpcWuhF.*|PertdywG.*|ZZDgCtXN.*|WWdDPyyE.*|uVtNQsKk.*|BdeCHvPZ.*|wshRnFlH.*|aOUIitIp.*|RxZeCdXT.*|CFZMslCj.*|AVBZRDxl.*|IzIGCnhw.*|ythYuWiz.*|oztXVXhl.*|VbLkwqQx.*|qvaUgyVC.*|VawUjPWC.*|ecloYJuj.*|boCLTdSU.*|uPrKeAZx.*|hrMWLWBq.*|JOnUNHRM.*|rYnujkPq.*|dDEdZhIj.*|DRrfvugG.*|yEGfDxVV.*|YMYdJWuP.*|PHUQZNWM.*|AmKNrLis.*|zTxndVfn.*|FPsHoJnc.*|EIulZTua.*|KlAPhdzg.*|ScHJJCLt.*|NtTfMzME.*|eMCwuFdo.*|SEpJVJbR.*|cdhXZeCx.*|sAVtBwRh.*|kVFEVcMI.*|jzJrxraA.*|tGLHTell.*|NNWoeSaw.*|DcOKSetX.*|UXZAJyka.*|THpMphDP.*|rizheevl.*|kDCBRidd.*|pCZZRqyu.*|pSygkitl.*|SwZGkAaW.*|wILOrfNX.*|QkwVOerj.*|kHOMxPDr.*|EwOVycJv.*|AJvtzQFS.*|yEOjKYYB.*|LizIINLL.*|JBRSsfcG.*|YPiUqqNl.*|IsdEbvee.*|MjEpGcBm.*|OxXZVgEQ.*|xClXGuxa.*|UzRCGFEb.*|buJbvfvA.*|IPZQxRet.*|oFYShsMc.*|oBHffuHO.*|bzzKrcBR.*|KAjzrGCl.*|IPUsAVls.*|OGMUMbIU.*|gyDccHuR.*|bjlalnDd.*|ZLWjeMna.*|fdsuIlxQ.*|dVXtiomV.*|XxedTjNg.*|XWMHlNoA.*|nnyqArQX.*|opfkWGhb.*|wYtnhdYb.*))", // A long case insensitive alternation where each entry starts with ".*". @@ -79,15 +84,27 @@ var ( ".*foo.?", ".?foo.+", "foo.?|bar", + "ſſs", + // Concat of literals and wildcards. + ".*-.*-.*-.*-.*", + "(.+)-(.+)-(.+)-(.+)-(.+)", + "((.*))(?i:f)((.*))o((.*))o((.*))", + "((.*))f((.*))(?i:o)((.*))o((.*))", } values = []string{ "foo", " foo bar", "bar", "buzz\nbar", "bar foo", "bfoo", "\n", "\nfoo", "foo\n", "hello foo world", "hello foo\n world", "", - "FOO", "Foo", "OO", "Oo", "\nfoo\n", strings.Repeat("f", 20), "prometheus", "prometheus_api_v1", "prometheus_api_v1_foo", + "FOO", "Foo", "fOo", "foO", "OO", "Oo", "\nfoo\n", strings.Repeat("f", 20), "prometheus", "prometheus_api_v1", "prometheus_api_v1_foo", "10.0.1.20", "10.0.2.10", "10.0.3.30", "10.0.4.40", - "foofoo0", "foofoo", + "foofoo0", "foofoo", "😀foo0", "ſſs", "ſſS", "AAAAAAAAAAAAAAAAAAAAAAAA", "BBBBBBBBBBBBBBBBBBBBBBBB", "cccccccccccccccccccccccC", "ſſſſſſſſſſſſſſſſſſſſſſſſS", "SSSSSSSSSSSSSSSSSSSSSSSSſ", // Values matching / not matching the test regexps on long alternations. "zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX", + + // Invalid utf8 + "\xfefoo", + "foo\xfe", + "\xfd", + "\xff\xff", } ) @@ -124,29 +141,29 @@ func TestOptimizeConcatRegex(t *testing.T) { regex string prefix string suffix string - contains string + contains []string }{ - {regex: "foo(hello|bar)", prefix: "foo", suffix: "", contains: ""}, - {regex: "foo(hello|bar)world", prefix: "foo", suffix: "world", contains: ""}, - {regex: "foo.*", prefix: "foo", suffix: "", contains: ""}, - {regex: "foo.*hello.*bar", prefix: "foo", suffix: "bar", contains: "hello"}, - {regex: ".*foo", prefix: "", suffix: "foo", contains: ""}, - {regex: "^.*foo$", prefix: "", suffix: "foo", contains: ""}, - {regex: ".*foo.*", prefix: "", suffix: "", contains: "foo"}, - {regex: ".*foo.*bar.*", prefix: "", suffix: "", contains: "foo"}, - {regex: ".*(foo|bar).*", prefix: "", suffix: "", contains: ""}, - {regex: ".*[abc].*", prefix: "", suffix: "", contains: ""}, - {regex: ".*((?i)abc).*", prefix: "", suffix: "", contains: ""}, - {regex: ".*(?i:abc).*", prefix: "", suffix: "", contains: ""}, - {regex: "(?i:abc).*", prefix: "", suffix: "", contains: ""}, - {regex: ".*(?i:abc)", prefix: "", suffix: "", contains: ""}, - {regex: ".*(?i:abc)def.*", prefix: "", suffix: "", contains: "def"}, - {regex: "(?i).*(?-i:abc)def", prefix: "", suffix: "", contains: "abc"}, - {regex: ".*(?msU:abc).*", prefix: "", suffix: "", contains: "abc"}, - {regex: "[aA]bc.*", prefix: "", suffix: "", contains: "bc"}, - {regex: "^5..$", prefix: "5", suffix: "", contains: ""}, - {regex: "^release.*", prefix: "release", suffix: "", contains: ""}, - {regex: "^env-[0-9]+laio[1]?[^0-9].*", prefix: "env-", suffix: "", contains: "laio"}, + {regex: "foo(hello|bar)", prefix: "foo", suffix: "", contains: nil}, + {regex: "foo(hello|bar)world", prefix: "foo", suffix: "world", contains: nil}, + {regex: "foo.*", prefix: "foo", suffix: "", contains: nil}, + {regex: "foo.*hello.*bar", prefix: "foo", suffix: "bar", contains: []string{"hello"}}, + {regex: ".*foo", prefix: "", suffix: "foo", contains: nil}, + {regex: "^.*foo$", prefix: "", suffix: "foo", contains: nil}, + {regex: ".*foo.*", prefix: "", suffix: "", contains: []string{"foo"}}, + {regex: ".*foo.*bar.*", prefix: "", suffix: "", contains: []string{"foo", "bar"}}, + {regex: ".*(foo|bar).*", prefix: "", suffix: "", contains: nil}, + {regex: ".*[abc].*", prefix: "", suffix: "", contains: nil}, + {regex: ".*((?i)abc).*", prefix: "", suffix: "", contains: nil}, + {regex: ".*(?i:abc).*", prefix: "", suffix: "", contains: nil}, + {regex: "(?i:abc).*", prefix: "", suffix: "", contains: nil}, + {regex: ".*(?i:abc)", prefix: "", suffix: "", contains: nil}, + {regex: ".*(?i:abc)def.*", prefix: "", suffix: "", contains: []string{"def"}}, + {regex: "(?i).*(?-i:abc)def", prefix: "", suffix: "", contains: []string{"abc"}}, + {regex: ".*(?msU:abc).*", prefix: "", suffix: "", contains: []string{"abc"}}, + {regex: "[aA]bc.*", prefix: "", suffix: "", contains: []string{"bc"}}, + {regex: "^5..$", prefix: "5", suffix: "", contains: nil}, + {regex: "^release.*", prefix: "release", suffix: "", contains: nil}, + {regex: "^env-[0-9]+laio[1]?[^0-9].*", prefix: "env-", suffix: "", contains: []string{"laio"}}, } for _, c := range cases { @@ -280,6 +297,52 @@ func BenchmarkFastRegexMatcher(b *testing.B) { } } +func BenchmarkToNormalizedLower(b *testing.B) { + benchCase := func(l int, uppercase string, asciiOnly bool, alt int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + if !asciiOnly { + chars = "aаbбcвdгeдfеgёhжiзjиkйlкmлnмoнpоqпrрsсtтuуvфwхxцyчzш" + } + // Swap the alphabet to make alternatives. + chars = chars[alt%len(chars):] + chars[:alt%len(chars)] + + str := strings.Repeat(chars, l/len(chars)+1)[:l] + switch uppercase { + case "first": + return strings.ToUpper(str[:1]) + str[1:] + case "last": + return str[:len(str)-1] + strings.ToUpper(str[len(str)-1:]) + case "all": + return strings.ToUpper(str) + case "none": + return str + default: + panic("invalid uppercase") + } + } + + for _, l := range []int{10, 100, 1000, 4000} { + b.Run(fmt.Sprintf("length=%d", l), func(b *testing.B) { + for _, uppercase := range []string{"none", "first", "last", "all"} { + b.Run("uppercase="+uppercase, func(b *testing.B) { + for _, asciiOnly := range []bool{true, false} { + b.Run(fmt.Sprintf("ascii=%t", asciiOnly), func(b *testing.B) { + inputs := make([]string, 10) + for i := range inputs { + inputs[i] = benchCase(l, uppercase, asciiOnly, i) + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + toNormalisedLower(inputs[n%len(inputs)]) + } + }) + } + }) + } + }) + } +} + func TestStringMatcherFromRegexp(t *testing.T) { for _, c := range []struct { pattern string @@ -315,7 +378,7 @@ func TestStringMatcherFromRegexp(t *testing.T) { {"10\\.0\\.(1|2)\\.+", nil}, {"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, {"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{}, suffix: "foo", suffixCaseSensitive: true}}, - {"foo-.*$", &literalPrefixStringMatcher{prefix: "foo-", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}}, + {"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: anyStringWithoutNewlineMatcher{}}}, {"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}}, {"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}, &equalStringMatcher{s: "foo", caseSensitive: true}})}, {"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: false}}})}, @@ -330,15 +393,15 @@ func TestStringMatcherFromRegexp(t *testing.T) { {".*foo.*bar.*", nil}, {`\d*`, nil}, {".", nil}, - {"/|/bar.*", &literalPrefixStringMatcher{prefix: "/", prefixCaseSensitive: true, right: orStringMatcher{emptyStringMatcher{}, &literalPrefixStringMatcher{prefix: "bar", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}}}}, + {"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: anyStringWithoutNewlineMatcher{}}}}}, // This one is not supported because `stringMatcherFromRegexp` is not reentrant for syntax.OpConcat. // It would make the code too complex to handle it. {"(.+)/(foo.*|bar$)", nil}, // Case sensitive alternate with same literal prefix and .* suffix. - {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixStringMatcher{prefix: "xyz-016a-ixb-", prefixCaseSensitive: true, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "dp", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "op", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}}}}, + {"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: anyStringWithoutNewlineMatcher{}}}}}, // Case insensitive alternate with same literal prefix and .* suffix. - {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixStringMatcher{prefix: "XYZ-016A-IXB-", prefixCaseSensitive: false, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "DP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "OP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}}}}, - {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixStringMatcher{prefix: "XYZ-016A-IXB-", prefixCaseSensitive: false, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "DP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "OP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}}}}, + {"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, + {"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}}, // Concatenated variable length selectors are not supported. {"foo.*.*", nil}, {"foo.+.+", nil}, @@ -347,9 +410,9 @@ func TestStringMatcherFromRegexp(t *testing.T) { {"aaa.?.?", nil}, {"aaa.?.*", nil}, // Regexps with ".?". - {"ext.?|xfs", orStringMatcher{&literalPrefixStringMatcher{prefix: "ext", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, - {"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixStringMatcher{prefix: "ext", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, - {"foo.?", &literalPrefixStringMatcher{prefix: "foo", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: false}}}, + {"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, + {"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}}, + {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}}, {"f.?o", nil}, } { c := c @@ -419,10 +482,13 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) { re := regexp.MustCompile("^" + c.pattern + "$") - // Pre-condition check: ensure it contains literalPrefixStringMatcher. + // Pre-condition check: ensure it contains literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher. numPrefixMatchers := 0 visitStringMatcher(matcher, func(matcher StringMatcher) { - if _, ok := matcher.(*literalPrefixStringMatcher); ok { + if _, ok := matcher.(*literalPrefixSensitiveStringMatcher); ok { + numPrefixMatchers++ + } + if _, ok := matcher.(*literalPrefixInsensitiveStringMatcher); ok { numPrefixMatchers++ } }) @@ -622,7 +688,15 @@ func randStrings(randGenerator *rand.Rand, many, length int) []string { return out } -func TestOptimizeEqualStringMatchers(t *testing.T) { +func randStringsWithSuffix(randGenerator *rand.Rand, many, length int, suffix string) []string { + out := randStrings(randGenerator, many, length) + for i := range out { + out[i] += suffix + } + return out +} + +func TestOptimizeEqualOrPrefixStringMatchers(t *testing.T) { tests := map[string]struct { input StringMatcher expectedValues []string @@ -703,7 +777,7 @@ func TestOptimizeEqualStringMatchers(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - actualMatcher := optimizeEqualStringMatchers(testData.input, 0) + actualMatcher := optimizeEqualOrPrefixStringMatchers(testData.input, 0) if testData.expectedValues == nil { require.IsType(t, testData.input, actualMatcher) @@ -718,10 +792,12 @@ func TestOptimizeEqualStringMatchers(t *testing.T) { func TestNewEqualMultiStringMatcher(t *testing.T) { tests := map[string]struct { - values []string - caseSensitive bool - expectedValuesMap map[string]struct{} - expectedValuesList []string + values []string + caseSensitivePrefixes []*literalPrefixSensitiveStringMatcher + caseSensitive bool + expectedValuesMap map[string]struct{} + expectedPrefixesMap map[string][]StringMatcher + expectedValuesList []string }{ "few case sensitive values": { values: []string{"a", "B"}, @@ -733,27 +809,47 @@ func TestNewEqualMultiStringMatcher(t *testing.T) { caseSensitive: false, expectedValuesList: []string{"a", "B"}, }, + "few case sensitive values and prefixes": { + values: []string{"a"}, + caseSensitivePrefixes: []*literalPrefixSensitiveStringMatcher{{prefix: "B", right: anyStringWithoutNewlineMatcher{}}}, + caseSensitive: true, + expectedValuesMap: map[string]struct{}{"a": {}}, + expectedPrefixesMap: map[string][]StringMatcher{"B": {&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}}}, + }, "many case sensitive values": { - values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, - caseSensitive: true, - expectedValuesMap: map[string]struct{}{"a": {}, "B": {}, "c": {}, "D": {}, "e": {}, "F": {}, "g": {}, "H": {}, "i": {}, "L": {}, "m": {}, "N": {}, "o": {}, "P": {}, "q": {}, "r": {}}, + values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, + caseSensitive: true, + expectedValuesMap: map[string]struct{}{"a": {}, "B": {}, "c": {}, "D": {}, "e": {}, "F": {}, "g": {}, "H": {}, "i": {}, "L": {}, "m": {}, "N": {}, "o": {}, "P": {}, "q": {}, "r": {}}, + expectedPrefixesMap: map[string][]StringMatcher{}, }, "many case insensitive values": { - values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, - caseSensitive: false, - expectedValuesMap: map[string]struct{}{"a": {}, "b": {}, "c": {}, "d": {}, "e": {}, "f": {}, "g": {}, "h": {}, "i": {}, "l": {}, "m": {}, "n": {}, "o": {}, "p": {}, "q": {}, "r": {}}, + values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, + caseSensitive: false, + expectedValuesMap: map[string]struct{}{"a": {}, "b": {}, "c": {}, "d": {}, "e": {}, "f": {}, "g": {}, "h": {}, "i": {}, "l": {}, "m": {}, "n": {}, "o": {}, "p": {}, "q": {}, "r": {}}, + expectedPrefixesMap: map[string][]StringMatcher{}, }, } for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values)) + // To keep this test simple, we always assume a min prefix length of 1. + minPrefixLength := 0 + if len(testData.caseSensitivePrefixes) > 0 { + minPrefixLength = 1 + } + + matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values), len(testData.caseSensitivePrefixes), minPrefixLength) for _, v := range testData.values { matcher.add(v) } - if testData.expectedValuesMap != nil { + for _, p := range testData.caseSensitivePrefixes { + matcher.addPrefix(p.prefix, true, p) + } + + if testData.expectedValuesMap != nil || testData.expectedPrefixesMap != nil { require.IsType(t, &equalMultiStringMapMatcher{}, matcher) require.Equal(t, testData.expectedValuesMap, matcher.(*equalMultiStringMapMatcher).values) + require.Equal(t, testData.expectedPrefixesMap, matcher.(*equalMultiStringMapMatcher).prefixes) require.Equal(t, testData.caseSensitive, matcher.(*equalMultiStringMapMatcher).caseSensitive) } if testData.expectedValuesList != nil { @@ -765,9 +861,32 @@ func TestNewEqualMultiStringMatcher(t *testing.T) { } } +func TestEqualMultiStringMapMatcher_addPrefix(t *testing.T) { + t.Run("should panic if the matcher is case sensitive but the prefix is not case sensitive", func(t *testing.T) { + matcher := newEqualMultiStringMatcher(true, 0, 1, 1) + + require.Panics(t, func() { + matcher.addPrefix("a", false, &literalPrefixInsensitiveStringMatcher{ + prefix: "a", + }) + }) + }) + + t.Run("should panic if the matcher is not case sensitive but the prefix is case sensitive", func(t *testing.T) { + matcher := newEqualMultiStringMatcher(false, 0, 1, 1) + + require.Panics(t, func() { + matcher.addPrefix("a", true, &literalPrefixSensitiveStringMatcher{ + prefix: "a", + }) + }) + }) +} + func TestEqualMultiStringMatcher_Matches(t *testing.T) { tests := map[string]struct { values []string + prefixes []StringMatcher caseSensitive bool expectedMatches []string expectedNotMatches []string @@ -784,6 +903,24 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) { expectedMatches: []string{"a", "A", "b", "B"}, expectedNotMatches: []string{"c", "C"}, }, + "few case sensitive prefixes": { + prefixes: []StringMatcher{ + &literalPrefixSensitiveStringMatcher{prefix: "a", right: anyStringWithoutNewlineMatcher{}}, + &literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}, + }, + caseSensitive: true, + expectedMatches: []string{"a", "aX", "B", "BX"}, + expectedNotMatches: []string{"A", "b"}, + }, + "few case insensitive prefixes": { + prefixes: []StringMatcher{ + &literalPrefixInsensitiveStringMatcher{prefix: "a", right: anyStringWithoutNewlineMatcher{}}, + &literalPrefixInsensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}, + }, + caseSensitive: false, + expectedMatches: []string{"a", "aX", "A", "AX", "b", "bX", "B", "BX"}, + expectedNotMatches: []string{"c", "cX", "C", "CX"}, + }, "many case sensitive values": { values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"}, caseSensitive: true, @@ -796,14 +933,37 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) { expectedMatches: []string{"a", "A", "b", "B"}, expectedNotMatches: []string{"x", "X"}, }, + "mixed values and prefixes": { + values: []string{"a"}, + prefixes: []StringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}}, + caseSensitive: true, + expectedMatches: []string{"a", "B", "BX"}, + expectedNotMatches: []string{"aX", "A", "b", "bX"}, + }, } for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values)) + // To keep this test simple, we always assume a min prefix length of 1. + minPrefixLength := 0 + if len(testData.prefixes) > 0 { + minPrefixLength = 1 + } + + matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values), len(testData.prefixes), minPrefixLength) for _, v := range testData.values { matcher.add(v) } + for _, p := range testData.prefixes { + switch m := p.(type) { + case *literalPrefixSensitiveStringMatcher: + matcher.addPrefix(m.prefix, true, p) + case *literalPrefixInsensitiveStringMatcher: + matcher.addPrefix(m.prefix, false, p) + default: + panic("Unexpected type in test case") + } + } for _, v := range testData.expectedMatches { require.True(t, matcher.Matches(v), "value: %s", v) @@ -815,29 +975,33 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) { } } -func TestFindEqualStringMatchers(t *testing.T) { +func TestFindEqualOrPrefixStringMatchers(t *testing.T) { type match struct { s string caseSensitive bool } - // Utility to call findEqualStringMatchers() and collect all callback invocations. - findEqualStringMatchersAndCollectMatches := func(input StringMatcher) (matches []match, ok bool) { - ok = findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool { + // Utility to call findEqualOrPrefixStringMatchers() and collect all callback invocations. + findEqualOrPrefixStringMatchersAndCollectMatches := func(input StringMatcher) (matches []match, ok bool) { + ok = findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { matches = append(matches, match{matcher.s, matcher.caseSensitive}) return true + }, func(prefix string, prefixCaseSensitive bool, right StringMatcher) bool { + matches = append(matches, match{prefix, prefixCaseSensitive}) + return true }) + return } t.Run("empty matcher", func(t *testing.T) { - actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(emptyStringMatcher{}) + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(emptyStringMatcher{}) require.False(t, actualOk) require.Empty(t, actualMatches) }) t.Run("concat of literal matchers (case sensitive)", func(t *testing.T) { - actualMatches, actualOk := findEqualStringMatchersAndCollectMatches( + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( orStringMatcher{ &equalStringMatcher{s: "test-1", caseSensitive: true}, &equalStringMatcher{s: "test-2", caseSensitive: true}, @@ -849,7 +1013,7 @@ func TestFindEqualStringMatchers(t *testing.T) { }) t.Run("concat of literal matchers (case insensitive)", func(t *testing.T) { - actualMatches, actualOk := findEqualStringMatchersAndCollectMatches( + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( orStringMatcher{ &equalStringMatcher{s: "test-1", caseSensitive: false}, &equalStringMatcher{s: "test-2", caseSensitive: false}, @@ -861,7 +1025,7 @@ func TestFindEqualStringMatchers(t *testing.T) { }) t.Run("concat of literal matchers (mixed case)", func(t *testing.T) { - actualMatches, actualOk := findEqualStringMatchersAndCollectMatches( + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( orStringMatcher{ &equalStringMatcher{s: "test-1", caseSensitive: false}, &equalStringMatcher{s: "test-2", caseSensitive: true}, @@ -871,11 +1035,59 @@ func TestFindEqualStringMatchers(t *testing.T) { require.True(t, actualOk) require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches) }) + + t.Run("concat of literal prefix matchers (case sensitive)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &literalPrefixSensitiveStringMatcher{prefix: "test-1"}, + &literalPrefixSensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches) + }) + + t.Run("concat of literal prefix matchers (case insensitive)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &literalPrefixInsensitiveStringMatcher{prefix: "test-1"}, + &literalPrefixInsensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", false}, {"test-2", false}}, actualMatches) + }) + + t.Run("concat of literal prefix matchers (mixed case)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &literalPrefixInsensitiveStringMatcher{prefix: "test-1"}, + &literalPrefixSensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches) + }) + + t.Run("concat of literal string and prefix matchers (case sensitive)", func(t *testing.T) { + actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches( + orStringMatcher{ + &equalStringMatcher{s: "test-1", caseSensitive: true}, + &literalPrefixSensitiveStringMatcher{prefix: "test-2"}, + }, + ) + + require.True(t, actualOk) + require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches) + }) } // This benchmark is used to find a good threshold to use to apply the optimization -// done by optimizeEqualStringMatchers(). -func BenchmarkOptimizeEqualStringMatchers(b *testing.B) { +// done by optimizeEqualOrPrefixStringMatchers(). +func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) { randGenerator := rand.New(rand.NewSource(time.Now().UnixNano())) // Generate variable lengths random texts to match against. @@ -885,76 +1097,151 @@ func BenchmarkOptimizeEqualStringMatchers(b *testing.B) { for numAlternations := 2; numAlternations <= 256; numAlternations *= 2 { for _, caseSensitive := range []bool{true, false} { - b.Run(fmt.Sprintf("alternations: %d case sensitive: %t", numAlternations, caseSensitive), func(b *testing.B) { - // Generate a regex with the expected number of alternations. - re := strings.Join(randStrings(randGenerator, numAlternations, 10), "|") - if !caseSensitive { - re = "(?i:(" + re + "))" - } - - parsed, err := syntax.Parse(re, syntax.Perl) - require.NoError(b, err) - - unoptimized := stringMatcherFromRegexpInternal(parsed) - require.IsType(b, orStringMatcher{}, unoptimized) - - optimized := optimizeEqualStringMatchers(unoptimized, 0) - if numAlternations < minEqualMultiStringMatcherMapThreshold { - require.IsType(b, &equalMultiStringSliceMatcher{}, optimized) - } else { - require.IsType(b, &equalMultiStringMapMatcher{}, optimized) - } - - b.Run("without optimizeEqualStringMatchers()", func(b *testing.B) { - for n := 0; n < b.N; n++ { - for _, t := range texts { - unoptimized.Matches(t) - } + for _, prefixMatcher := range []bool{true, false} { + b.Run(fmt.Sprintf("alternations: %d case sensitive: %t prefix matcher: %t", numAlternations, caseSensitive, prefixMatcher), func(b *testing.B) { + // If the test should run on prefix matchers, we add a wildcard matcher as suffix (prefix will be a literal). + suffix := "" + if prefixMatcher { + suffix = ".*" } - }) - b.Run("with optimizeEqualStringMatchers()", func(b *testing.B) { - for n := 0; n < b.N; n++ { - for _, t := range texts { - optimized.Matches(t) - } + // Generate a regex with the expected number of alternations. + re := strings.Join(randStringsWithSuffix(randGenerator, numAlternations, 10, suffix), "|") + if !caseSensitive { + re = "(?i:(" + re + "))" } + b.Logf("regexp: %s", re) + + parsed, err := syntax.Parse(re, syntax.Perl) + require.NoError(b, err) + + unoptimized := stringMatcherFromRegexpInternal(parsed) + require.IsType(b, orStringMatcher{}, unoptimized) + + optimized := optimizeEqualOrPrefixStringMatchers(unoptimized, 0) + if numAlternations < minEqualMultiStringMatcherMapThreshold && !prefixMatcher { + require.IsType(b, &equalMultiStringSliceMatcher{}, optimized) + } else { + require.IsType(b, &equalMultiStringMapMatcher{}, optimized) + } + + b.Run("without optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) { + for n := 0; n < b.N; n++ { + for _, t := range texts { + unoptimized.Matches(t) + } + } + }) + + b.Run("with optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) { + for n := 0; n < b.N; n++ { + for _, t := range texts { + optimized.Matches(t) + } + } + }) }) - }) + } } } } func TestZeroOrOneCharacterStringMatcher(t *testing.T) { - matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} - require.True(t, matcher.Matches("")) - require.True(t, matcher.Matches("x")) - require.True(t, matcher.Matches("\n")) - require.False(t, matcher.Matches("xx")) - require.False(t, matcher.Matches("\n\n")) + t.Run("match newline", func(t *testing.T) { + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} + require.True(t, matcher.Matches("")) + require.True(t, matcher.Matches("x")) + require.True(t, matcher.Matches("\n")) + require.False(t, matcher.Matches("xx")) + require.False(t, matcher.Matches("\n\n")) + }) - matcher = &zeroOrOneCharacterStringMatcher{matchNL: false} - require.True(t, matcher.Matches("")) - require.True(t, matcher.Matches("x")) - require.False(t, matcher.Matches("\n")) - require.False(t, matcher.Matches("xx")) - require.False(t, matcher.Matches("\n\n")) + t.Run("do not match newline", func(t *testing.T) { + matcher := &zeroOrOneCharacterStringMatcher{matchNL: false} + require.True(t, matcher.Matches("")) + require.True(t, matcher.Matches("x")) + require.False(t, matcher.Matches("\n")) + require.False(t, matcher.Matches("xx")) + require.False(t, matcher.Matches("\n\n")) + }) + + t.Run("unicode", func(t *testing.T) { + // Just for documentation purposes, emoji1 is 1 rune, emoji2 is 2 runes. + // Having this in mind, will make future readers fixing tests easier. + emoji1 := "😀" + emoji2 := "❤️" + require.Equal(t, 1, utf8.RuneCountInString(emoji1)) + require.Equal(t, 2, utf8.RuneCountInString(emoji2)) + + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} + require.True(t, matcher.Matches(emoji1)) + require.False(t, matcher.Matches(emoji2)) + require.False(t, matcher.Matches(emoji1+emoji1)) + require.False(t, matcher.Matches("x"+emoji1)) + require.False(t, matcher.Matches(emoji1+"x")) + require.False(t, matcher.Matches(emoji1+emoji2)) + }) + + t.Run("invalid unicode", func(t *testing.T) { + // Just for reference, we also compare to what `^.?$` regular expression matches. + re := regexp.MustCompile("^.?$") + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} + + requireMatches := func(s string, expected bool) { + t.Helper() + require.Equal(t, expected, matcher.Matches(s)) + require.Equal(t, re.MatchString(s), matcher.Matches(s)) + } + + requireMatches("\xff", true) + requireMatches("x\xff", false) + requireMatches("\xffx", false) + requireMatches("\xff\xfe", false) + }) } -func TestLiteralPrefixStringMatcher(t *testing.T) { - m := &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &emptyStringMatcher{}} +func BenchmarkZeroOrOneCharacterStringMatcher(b *testing.B) { + type benchCase struct { + str string + matches bool + } + + emoji1 := "😀" + emoji2 := "❤️" + cases := []benchCase{ + {"", true}, + {"x", true}, + {"\n", true}, + {"xx", false}, + {"\n\n", false}, + {emoji1, true}, + {emoji2, false}, + {emoji1 + emoji1, false}, + {strings.Repeat("x", 100), false}, + {strings.Repeat(emoji1, 100), false}, + {strings.Repeat(emoji2, 100), false}, + } + + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} + b.ResetTimer() + + for n := 0; n < b.N; n++ { + c := cases[n%len(cases)] + got := matcher.Matches(c.str) + if got != c.matches { + b.Fatalf("unexpected result for %q: got %t, want %t", c.str, got, c.matches) + } + } +} + +func TestLiteralPrefixSensitiveStringMatcher(t *testing.T) { + m := &literalPrefixSensitiveStringMatcher{prefix: "mar", right: &emptyStringMatcher{}} require.True(t, m.Matches("mar")) require.False(t, m.Matches("marco")) require.False(t, m.Matches("ma")) require.False(t, m.Matches("mAr")) - m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: false, right: &emptyStringMatcher{}} - require.True(t, m.Matches("mar")) - require.False(t, m.Matches("marco")) - require.False(t, m.Matches("ma")) - require.True(t, m.Matches("mAr")) - - m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &equalStringMatcher{s: "co", caseSensitive: false}} + m = &literalPrefixSensitiveStringMatcher{prefix: "mar", right: &equalStringMatcher{s: "co", caseSensitive: false}} require.True(t, m.Matches("marco")) require.True(t, m.Matches("marCO")) require.False(t, m.Matches("MARco")) @@ -962,6 +1249,14 @@ func TestLiteralPrefixStringMatcher(t *testing.T) { require.False(t, m.Matches("marcopracucci")) } +func TestLiteralPrefixInsensitiveStringMatcher(t *testing.T) { + m := &literalPrefixInsensitiveStringMatcher{prefix: "mar", right: &emptyStringMatcher{}} + require.True(t, m.Matches("mar")) + require.False(t, m.Matches("marco")) + require.False(t, m.Matches("ma")) + require.True(t, m.Matches("mAr")) +} + func TestLiteralSuffixStringMatcher(t *testing.T) { m := &literalSuffixStringMatcher{left: &emptyStringMatcher{}, suffix: "co", suffixCaseSensitive: true} require.True(t, m.Matches("co")) @@ -1009,6 +1304,15 @@ func TestHasSuffixCaseInsensitive(t *testing.T) { require.False(t, hasSuffixCaseInsensitive("marco", "abcdefghi")) } +func TestContainsInOrder(t *testing.T) { + require.True(t, containsInOrder("abcdefghilmno", []string{"ab", "cd", "no"})) + require.True(t, containsInOrder("abcdefghilmno", []string{"def", "hil"})) + + require.False(t, containsInOrder("abcdefghilmno", []string{"ac"})) + require.False(t, containsInOrder("abcdefghilmno", []string{"ab", "cd", "de"})) + require.False(t, containsInOrder("abcdefghilmno", []string{"cd", "ab"})) +} + func getTestNameFromRegexp(re string) string { if len(re) > 32 { return re[:32] @@ -1042,7 +1346,10 @@ func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatch visitStringMatcher(casted.right, callback) } - case *literalPrefixStringMatcher: + case *literalPrefixSensitiveStringMatcher: + visitStringMatcher(casted.right, callback) + + case *literalPrefixInsensitiveStringMatcher: visitStringMatcher(casted.right, callback) case *literalSuffixStringMatcher: @@ -1054,12 +1361,35 @@ func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatch } // No nested matchers for the following ones. + case *equalMultiStringMapMatcher: + for _, prefixes := range casted.prefixes { + for _, matcher := range prefixes { + visitStringMatcher(matcher, callback) + } + } + case emptyStringMatcher: case *equalStringMatcher: case *equalMultiStringSliceMatcher: - case *equalMultiStringMapMatcher: case anyStringWithoutNewlineMatcher: case *anyNonEmptyStringMatcher: case trueMatcher: } } + +func TestToNormalisedLower(t *testing.T) { + testCases := map[string]string{ + "foo": "foo", + "FOO": "foo", + "Foo": "foo", + "foO": "foo", + "fOo": "foo", + "AAAAAAAAAAAAAAAAAAAAAAAA": "aaaaaaaaaaaaaaaaaaaaaaaa", + "cccccccccccccccccccccccC": "cccccccccccccccccccccccc", + "ſſſſſſſſſſſſſſſſſſſſſſſſS": "sssssssssssssssssssssssss", + "ſſAſſa": "ssassa", + } + for input, expectedOutput := range testCases { + require.Equal(t, expectedOutput, toNormalisedLower(input)) + } +} diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index d29c3d07ae..4f33edda43 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -17,6 +17,7 @@ import ( "crypto/md5" "encoding/binary" "fmt" + "strconv" "strings" "github.com/grafana/regexp" @@ -48,7 +49,7 @@ const ( Drop Action = "drop" // KeepEqual drops targets for which the input does not match the target. KeepEqual Action = "keepequal" - // Drop drops targets for which the input does match the target. + // DropEqual drops targets for which the input does match the target. DropEqual Action = "dropequal" // HashMod sets a label to the modulus of a hash of labels. HashMod Action = "hashmod" @@ -205,6 +206,11 @@ func (re Regexp) MarshalYAML() (interface{}, error) { return nil, nil } +// IsZero implements the yaml.IsZeroer interface. +func (re Regexp) IsZero() bool { + return re.Regexp == DefaultRelabelConfig.Regex.Regexp +} + // String returns the original string used to compile the regular expression. func (re Regexp) String() string { str := re.Regexp.String() @@ -290,7 +296,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { hash := md5.Sum([]byte(val)) // Use only the last 8 bytes of the hash to give the same result as earlier versions of this code. mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus - lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) + lb.Set(cfg.TargetLabel, strconv.FormatUint(mod, 10)) case LabelMap: lb.Range(func(l labels.Label) { if cfg.Regex.MatchString(l.Name) { diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index 6798fb02a5..0f11f7068d 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -14,7 +14,7 @@ package relabel import ( - "fmt" + "strconv" "testing" "github.com/prometheus/common/model" @@ -657,7 +657,7 @@ func TestRelabelValidate(t *testing.T) { }, } for i, test := range tests { - t.Run(fmt.Sprint(i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { err := test.config.Validate() if test.expected == "" { require.NoError(t, err) @@ -851,3 +851,52 @@ func BenchmarkRelabel(b *testing.B) { }) } } + +func TestConfig_UnmarshalThenMarshal(t *testing.T) { + tests := []struct { + name string + inputYaml string + }{ + { + name: "Values provided", + inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port] +separator: ; +regex: \\d+ +target_label: __meta_kubernetes_pod_container_port_number +replacement: $1 +action: replace +`, + }, + { + name: "No regex provided", + inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port] +separator: ; +target_label: __meta_kubernetes_pod_container_port_number +replacement: $1 +action: keepequal +`, + }, + { + name: "Default regex provided", + inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port] +separator: ; +regex: (.*) +target_label: __meta_kubernetes_pod_container_port_number +replacement: $1 +action: replace +`, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + unmarshalled := Config{} + err := yaml.Unmarshal([]byte(test.inputYaml), &unmarshalled) + require.NoError(t, err) + + marshalled, err := yaml.Marshal(&unmarshalled) + require.NoError(t, err) + + require.Equal(t, test.inputYaml, string(marshalled)) + }) + } +} diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index 4ed1619d64..bfb85ce740 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -136,10 +136,11 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { // RuleGroup is a list of sequentially evaluated recording and alerting rules. type RuleGroup struct { - Name string `yaml:"name"` - Interval model.Duration `yaml:"interval,omitempty"` - Limit int `yaml:"limit,omitempty"` - Rules []RuleNode `yaml:"rules"` + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + QueryOffset *model.Duration `yaml:"query_offset,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []RuleNode `yaml:"rules"` } // Rule describes an alerting or recording rule. diff --git a/notifier/notifier.go b/notifier/notifier.go index 4cf376aa05..68b0d4961e 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -110,10 +110,11 @@ type Manager struct { metrics *alertMetrics - more chan struct{} - mtx sync.RWMutex - ctx context.Context - cancel func() + more chan struct{} + mtx sync.RWMutex + + stopOnce *sync.Once + stopRequested chan struct{} alertmanagers map[string]*alertmanagerSet logger log.Logger @@ -121,9 +122,10 @@ type Manager struct { // Options are the configurable parameters of a Handler. type Options struct { - QueueCapacity int - ExternalLabels labels.Labels - RelabelConfigs []*relabel.Config + QueueCapacity int + DrainOnShutdown bool + ExternalLabels labels.Labels + RelabelConfigs []*relabel.Config // Used for sending HTTP requests to the Alertmanager. Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) @@ -217,8 +219,6 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp // NewManager is the manager constructor. func NewManager(o *Options, logger log.Logger) *Manager { - ctx, cancel := context.WithCancel(context.Background()) - if o.Do == nil { o.Do = do } @@ -227,12 +227,12 @@ func NewManager(o *Options, logger log.Logger) *Manager { } n := &Manager{ - queue: make([]*Alert, 0, o.QueueCapacity), - ctx: ctx, - cancel: cancel, - more: make(chan struct{}, 1), - opts: o, - logger: logger, + queue: make([]*Alert, 0, o.QueueCapacity), + more: make(chan struct{}, 1), + stopRequested: make(chan struct{}), + stopOnce: &sync.Once{}, + opts: o, + logger: logger, } queueLenFunc := func() float64 { return float64(n.queueLen()) } @@ -298,38 +298,100 @@ func (n *Manager) nextBatch() []*Alert { return alerts } -// Run dispatches notifications continuously. +// Run dispatches notifications continuously, returning once Stop has been called and all +// pending notifications have been drained from the queue (if draining is enabled). +// +// Dispatching of notifications occurs in parallel to processing target updates to avoid one starving the other. +// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details. func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { + wg := sync.WaitGroup{} + wg.Add(2) + + go func() { + defer wg.Done() + n.targetUpdateLoop(tsets) + }() + + go func() { + defer wg.Done() + n.sendLoop() + n.drainQueue() + }() + + wg.Wait() + level.Info(n.logger).Log("msg", "Notification manager stopped") +} + +// sendLoop continuously consumes the notifications queue and sends alerts to +// the configured Alertmanagers. +func (n *Manager) sendLoop() { for { - // The select is split in two parts, such as we will first try to read - // new alertmanager targets if they are available, before sending new - // alerts. + // If we've been asked to stop, that takes priority over sending any further notifications. select { - case <-n.ctx.Done(): + case <-n.stopRequested: return - case ts := <-tsets: - n.reload(ts) default: select { - case <-n.ctx.Done(): + case <-n.stopRequested: + return + + case <-n.more: + n.sendOneBatch() + + // If the queue still has items left, kick off the next iteration. + if n.queueLen() > 0 { + n.setMore() + } + } + } + } +} + +// targetUpdateLoop receives updates of target groups and triggers a reload. +func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group) { + for { + // If we've been asked to stop, that takes priority over processing any further target group updates. + select { + case <-n.stopRequested: + return + default: + select { + case <-n.stopRequested: return case ts := <-tsets: n.reload(ts) - case <-n.more: } } - alerts := n.nextBatch() - - if !n.sendAll(alerts...) { - n.metrics.dropped.Add(float64(len(alerts))) - } - // If the queue still has items left, kick off the next iteration. - if n.queueLen() > 0 { - n.setMore() - } } } +func (n *Manager) sendOneBatch() { + alerts := n.nextBatch() + + if !n.sendAll(alerts...) { + n.metrics.dropped.Add(float64(len(alerts))) + } +} + +func (n *Manager) drainQueue() { + if !n.opts.DrainOnShutdown { + if n.queueLen() > 0 { + level.Warn(n.logger).Log("msg", "Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) + n.metrics.dropped.Add(float64(n.queueLen())) + } + + return + } + + level.Info(n.logger).Log("msg", "Draining any remaining notifications...") + + for n.queueLen() > 0 { + n.sendOneBatch() + } + + level.Info(n.logger).Log("msg", "Remaining notifications drained") +} + func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { n.mtx.Lock() defer n.mtx.Unlock() @@ -471,10 +533,6 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { numSuccess atomic.Uint64 ) for _, ams := range amSets { - if len(ams.ams) == 0 { - continue - } - var ( payload []byte err error @@ -483,6 +541,11 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { ams.mtx.RLock() + if len(ams.ams) == 0 { + ams.mtx.RUnlock() + continue + } + if len(ams.cfg.AlertRelabelConfigs) > 0 { amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts) if len(amAlerts) == 0 { @@ -541,7 +604,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { for _, am := range ams.ams { wg.Add(1) - ctx, cancel := context.WithTimeout(n.ctx, time.Duration(ams.cfg.Timeout)) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout)) defer cancel() go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { @@ -611,6 +674,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b }() // Any HTTP status 2xx is OK. + //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return fmt.Errorf("bad response status %s", resp.Status) } @@ -618,10 +682,19 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b return nil } -// Stop shuts down the notification handler. +// Stop signals the notification manager to shut down and immediately returns. +// +// Run will return once the notification manager has successfully shut down. +// +// The manager will optionally drain any queued notifications before shutting down. +// +// Stop is safe to call multiple times. func (n *Manager) Stop() { level.Info(n.logger).Log("msg", "Stopping notification manager...") - n.cancel() + + n.stopOnce.Do(func() { + close(n.stopRequested) + }) } // Alertmanager holds Alertmanager endpoint information. diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index e7a9243bc2..2cdaa9e06d 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -26,13 +26,17 @@ import ( "testing" "time" + "github.com/go-kit/log" "github.com/prometheus/alertmanager/api/v2/models" + "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" @@ -74,7 +78,7 @@ func TestHandlerNextBatch(t *testing.T) { for i := range make([]struct{}, 2*maxBatchSize+1) { h.queue = append(h.queue, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } @@ -186,10 +190,10 @@ func TestHandlerSendAll(t *testing.T) { for i := range make([]struct{}, maxBatchSize) { h.queue = append(h.queue, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) expected = append(expected, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } @@ -297,23 +301,23 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) { for i := range make([]struct{}, maxBatchSize/2) { h.queue = append(h.queue, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }, &Alert{ - Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)), }, ) expected1 = append(expected1, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }, &Alert{ - Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)), }, ) expected2 = append(expected2, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } @@ -502,7 +506,7 @@ func TestHandlerQueuing(t *testing.T) { var alerts []*Alert for i := range make([]struct{}, 20*maxBatchSize) { alerts = append(alerts, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } @@ -697,117 +701,319 @@ func TestLabelsToOpenAPILabelSet(t *testing.T) { require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.FromStrings("aaa", "111", "bbb", "222"))) } -// TestHangingNotifier validates that targets updates happen even when there are -// queued alerts. +// TestHangingNotifier ensures that the notifier takes into account SD changes even when there are +// queued alerts. This test reproduces the issue described in https://github.com/prometheus/prometheus/issues/13676. +// and https://github.com/prometheus/prometheus/issues/8768. func TestHangingNotifier(t *testing.T) { - // Note: When targets are not updated in time, this test is flaky because go - // selects are not deterministic. Therefore we run 10 subtests to run into the issue. - for i := 0; i < 10; i++ { - t.Run(strconv.Itoa(i), func(t *testing.T) { - var ( - done = make(chan struct{}) - changed = make(chan struct{}) - syncCh = make(chan map[string][]*targetgroup.Group) - ) + const ( + batches = 100 + alertsCount = maxBatchSize * batches + ) - defer func() { - close(done) - }() + var ( + sendTimeout = 10 * time.Millisecond + sdUpdatert = sendTimeout / 2 - var calledOnce bool - // Setting up a bad server. This server hangs for 2 seconds. - badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if calledOnce { - t.Fatal("hanging server called multiple times") - } - calledOnce = true - select { - case <-done: - case <-time.After(2 * time.Second): - } - })) - badURL, err := url.Parse(badServer.URL) - require.NoError(t, err) - badAddress := badURL.Host // Used for __name__ label in targets. + done = make(chan struct{}) + ) - // Setting up a bad server. This server returns fast, signaling requests on - // by closing the changed channel. - goodServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - close(changed) - })) - goodURL, err := url.Parse(goodServer.URL) - require.NoError(t, err) - goodAddress := goodURL.Host // Used for __name__ label in targets. + defer func() { + close(done) + }() - h := NewManager( - &Options{ - QueueCapacity: 20 * maxBatchSize, - }, - nil, - ) + // Set up a faulty Alertmanager. + var faultyCalled atomic.Bool + faultyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + faultyCalled.Store(true) + select { + case <-done: + case <-time.After(time.Hour): + } + })) + faultyURL, err := url.Parse(faultyServer.URL) + require.NoError(t, err) - h.alertmanagers = make(map[string]*alertmanagerSet) + // Set up a functional Alertmanager. + var functionalCalled atomic.Bool + functionalServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + functionalCalled.Store(true) + })) + functionalURL, err := url.Parse(functionalServer.URL) + require.NoError(t, err) - am1Cfg := config.DefaultAlertmanagerConfig - am1Cfg.Timeout = model.Duration(200 * time.Millisecond) + // Initialize the discovery manager + // This is relevant as the updates aren't sent continually in real life, but only each updatert. + // The old implementation of TestHangingNotifier didn't take that into acount. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + reg := prometheus.NewRegistry() + sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg)) + require.NoError(t, err) + sdManager := discovery.NewManager( + ctx, + log.NewNopLogger(), + reg, + sdMetrics, + discovery.Name("sd-manager"), + discovery.Updatert(sdUpdatert), + ) + go sdManager.Run() - h.alertmanagers["config-0"] = &alertmanagerSet{ - ams: []alertmanager{}, - cfg: &am1Cfg, - metrics: h.metrics, - } - go h.Run(syncCh) - defer h.Stop() + // Set up the notifier with both faulty and functional Alertmanagers. + notifier := NewManager( + &Options{ + QueueCapacity: alertsCount, + }, + nil, + ) + notifier.alertmanagers = make(map[string]*alertmanagerSet) + amCfg := config.DefaultAlertmanagerConfig + amCfg.Timeout = model.Duration(sendTimeout) + notifier.alertmanagers["config-0"] = &alertmanagerSet{ + ams: []alertmanager{ + alertmanagerMock{ + urlf: func() string { return faultyURL.String() }, + }, + alertmanagerMock{ + urlf: func() string { return functionalURL.String() }, + }, + }, + cfg: &amCfg, + metrics: notifier.metrics, + } + go notifier.Run(sdManager.SyncCh()) + defer notifier.Stop() - var alerts []*Alert - for i := range make([]struct{}, 20*maxBatchSize) { - alerts = append(alerts, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), - }) - } + require.Len(t, notifier.Alertmanagers(), 2) - // Injecting the hanging server URL. - syncCh <- map[string][]*targetgroup.Group{ - "config-0": { - { - Targets: []model.LabelSet{ - { - model.AddressLabel: model.LabelValue(badAddress), - }, - }, - }, - }, - } - - // Queing alerts. - h.Send(alerts...) - - // Updating with a working alertmanager target. - go func() { - select { - case syncCh <- map[string][]*targetgroup.Group{ - "config-0": { - { - Targets: []model.LabelSet{ - { - model.AddressLabel: model.LabelValue(goodAddress), - }, - }, - }, - }, - }: - case <-done: - } - }() - - select { - case <-time.After(1 * time.Second): - t.Fatalf("Timeout after 1 second, targets not synced in time.") - case <-changed: - // The good server has been hit in less than 3 seconds, therefore - // targets have been updated before a second call could be made to the - // bad server. - } + // Enqueue the alerts. + var alerts []*Alert + for i := range make([]struct{}, alertsCount) { + alerts = append(alerts, &Alert{ + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } + notifier.Send(alerts...) + + // Wait for the Alertmanagers to start receiving alerts. + // 10*sdUpdatert is used as an arbitrary timeout here. + timeout := time.After(10 * sdUpdatert) +loop1: + for { + select { + case <-timeout: + t.Fatalf("Timeout waiting for the alertmanagers to be reached for the first time.") + default: + if faultyCalled.Load() && functionalCalled.Load() { + break loop1 + } + } + } + + // Request to remove the faulty Alertmanager. + c := map[string]discovery.Configs{ + "config-0": { + discovery.StaticConfig{ + &targetgroup.Group{ + Targets: []model.LabelSet{ + { + model.AddressLabel: model.LabelValue(functionalURL.Host), + }, + }, + }, + }, + }, + } + require.NoError(t, sdManager.ApplyConfig(c)) + + // The notifier should not wait until the alerts queue is empty to apply the discovery changes + // A faulty Alertmanager could cause each alert sending cycle to take up to AlertmanagerConfig.Timeout + // The queue may never be emptied, as the arrival rate could be larger than the departure rate + // It could even overflow and alerts could be dropped. + timeout = time.After(batches * sendTimeout) +loop2: + for { + select { + case <-timeout: + t.Fatalf("Timeout, the faulty alertmanager not removed on time.") + default: + // The faulty alertmanager was dropped. + if len(notifier.Alertmanagers()) == 1 { + // Prevent from TOCTOU. + require.Positive(t, notifier.queueLen()) + break loop2 + } + require.Positive(t, notifier.queueLen(), "The faulty alertmanager wasn't dropped before the alerts queue was emptied.") + } + } +} + +func TestStop_DrainingDisabled(t *testing.T) { + releaseReceiver := make(chan struct{}) + receiverReceivedRequest := make(chan struct{}, 2) + alertsReceived := atomic.NewInt64(0) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Let the test know we've received a request. + receiverReceivedRequest <- struct{}{} + + var alerts []*Alert + + b, err := io.ReadAll(r.Body) + require.NoError(t, err) + + err = json.Unmarshal(b, &alerts) + require.NoError(t, err) + + alertsReceived.Add(int64(len(alerts))) + + // Wait for the test to release us. + <-releaseReceiver + + w.WriteHeader(http.StatusOK) + })) + defer func() { + server.Close() + }() + + m := NewManager( + &Options{ + QueueCapacity: 10, + DrainOnShutdown: false, + }, + nil, + ) + + m.alertmanagers = make(map[string]*alertmanagerSet) + + am1Cfg := config.DefaultAlertmanagerConfig + am1Cfg.Timeout = model.Duration(time.Second) + + m.alertmanagers["1"] = &alertmanagerSet{ + ams: []alertmanager{ + alertmanagerMock{ + urlf: func() string { return server.URL }, + }, + }, + cfg: &am1Cfg, + } + + notificationManagerStopped := make(chan struct{}) + + go func() { + defer close(notificationManagerStopped) + m.Run(nil) + }() + + // Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later. + m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")}) + + select { + case <-receiverReceivedRequest: + // Nothing more to do. + case <-time.After(time.Second): + require.FailNow(t, "gave up waiting for receiver to receive notification of first alert") + } + + m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")}) + + // Stop the notification manager, pause to allow the shutdown to be observed, and then allow the receiver to proceed. + m.Stop() + time.Sleep(time.Second) + close(releaseReceiver) + + // Wait for the notification manager to stop and confirm only the first notification was sent. + // The second notification should be dropped. + select { + case <-notificationManagerStopped: + // Nothing more to do. + case <-time.After(time.Second): + require.FailNow(t, "gave up waiting for notification manager to stop") + } + + require.Equal(t, int64(1), alertsReceived.Load()) +} + +func TestStop_DrainingEnabled(t *testing.T) { + releaseReceiver := make(chan struct{}) + receiverReceivedRequest := make(chan struct{}, 2) + alertsReceived := atomic.NewInt64(0) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Let the test know we've received a request. + receiverReceivedRequest <- struct{}{} + + var alerts []*Alert + + b, err := io.ReadAll(r.Body) + require.NoError(t, err) + + err = json.Unmarshal(b, &alerts) + require.NoError(t, err) + + alertsReceived.Add(int64(len(alerts))) + + // Wait for the test to release us. + <-releaseReceiver + + w.WriteHeader(http.StatusOK) + })) + defer func() { + server.Close() + }() + + m := NewManager( + &Options{ + QueueCapacity: 10, + DrainOnShutdown: true, + }, + nil, + ) + + m.alertmanagers = make(map[string]*alertmanagerSet) + + am1Cfg := config.DefaultAlertmanagerConfig + am1Cfg.Timeout = model.Duration(time.Second) + + m.alertmanagers["1"] = &alertmanagerSet{ + ams: []alertmanager{ + alertmanagerMock{ + urlf: func() string { return server.URL }, + }, + }, + cfg: &am1Cfg, + } + + notificationManagerStopped := make(chan struct{}) + + go func() { + defer close(notificationManagerStopped) + m.Run(nil) + }() + + // Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later. + m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")}) + + select { + case <-receiverReceivedRequest: + // Nothing more to do. + case <-time.After(time.Second): + require.FailNow(t, "gave up waiting for receiver to receive notification of first alert") + } + + m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")}) + + // Stop the notification manager and allow the receiver to proceed. + m.Stop() + close(releaseReceiver) + + // Wait for the notification manager to stop and confirm both notifications were sent. + select { + case <-notificationManagerStopped: + // Nothing more to do. + case <-time.After(200 * time.Millisecond): + require.FailNow(t, "gave up waiting for notification manager to stop") + } + + require.Equal(t, int64(2), alertsReceived.Load()) } diff --git a/prompb/codec.go b/prompb/codec.go new file mode 100644 index 0000000000..ad30cd5e7b --- /dev/null +++ b/prompb/codec.go @@ -0,0 +1,201 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prompb + +import ( + "strings" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" +) + +// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon. + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels { + return labelProtosToLabels(b, m.GetLabels()) +} + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m ChunkedSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels { + return labelProtosToLabels(b, m.GetLabels()) +} + +func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []Label) labels.Labels { + b.Reset() + for _, l := range labelPairs { + b.Add(l.Name, l.Value) + } + b.Sort() + return b.Labels() +} + +// FromLabels transforms labels into prompb labels. The buffer slice +// will be used to avoid allocations if it is big enough to store the labels. +func FromLabels(lbls labels.Labels, buf []Label) []Label { + result := buf[:0] + lbls.Range(func(l labels.Label) { + result = append(result, Label{ + Name: l.Name, + Value: l.Value, + }) + }) + return result +} + +// FromMetadataType transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum. +func FromMetadataType(t model.MetricType) MetricMetadata_MetricType { + mt := strings.ToUpper(string(t)) + v, ok := MetricMetadata_MetricType_value[mt] + if !ok { + return MetricMetadata_UNKNOWN + } + return MetricMetadata_MetricType(v) +} + +// IsFloatHistogram returns true if the histogram is float. +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + +// ToIntHistogram returns integer Prometheus histogram from the remote implementation +// of integer histogram. If it's a float histogram, the method returns nil. +func (h Histogram) ToIntHistogram() *histogram.Histogram { + if h.IsFloatHistogram() { + return nil + } + return &histogram.Histogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountInt(), + Count: h.GetCountInt(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeDeltas(), + } +} + +// ToFloatHistogram returns float Prometheus histogram from the remote implementation +// of float histogram. If the underlying implementation is an integer histogram, a +// conversion is performed. +func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { + if h.IsFloatHistogram() { + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountFloat(), + Count: h.GetCountFloat(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveCounts(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeCounts(), + } + } + // Conversion from integer histogram. + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.GetZeroCountInt()), + Count: float64(h.GetCountInt()), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()), + } +} + +func spansProtoToSpans(s []BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func deltasToCounts(deltas []int64) []float64 { + counts := make([]float64, len(deltas)) + var cur float64 + for i, d := range deltas { + cur += float64(d) + counts[i] = cur + } + return counts +} + +// FromIntHistogram returns remote Histogram from the integer Histogram. +func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram { + return Histogram{ + Count: &Histogram_CountInt{CountInt: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, + NegativeSpans: spansToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: spansToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + ResetHint: Histogram_ResetHint(h.CounterResetHint), + Timestamp: timestamp, + } +} + +// FromFloatHistogram returns remote Histogram from the float Histogram. +func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram { + return Histogram{ + Count: &Histogram_CountFloat{CountFloat: fh.Count}, + Sum: fh.Sum, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, + NegativeSpans: spansToSpansProto(fh.NegativeSpans), + NegativeCounts: fh.NegativeBuckets, + PositiveSpans: spansToSpansProto(fh.PositiveSpans), + PositiveCounts: fh.PositiveBuckets, + ResetHint: Histogram_ResetHint(fh.CounterResetHint), + Timestamp: timestamp, + } +} + +func spansToSpansProto(s []histogram.Span) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +// ToExemplar converts remote exemplar to model exemplar. +func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, _ []string) exemplar.Exemplar { + timestamp := m.Timestamp + + return exemplar.Exemplar{ + Labels: labelProtosToLabels(b, m.GetLabels()), + Value: m.Value, + Ts: timestamp, + HasTs: timestamp != 0, + } +} diff --git a/prompb/custom.go b/prompb/custom.go index 13d6e0f0cd..f73ddd446b 100644 --- a/prompb/custom.go +++ b/prompb/custom.go @@ -17,14 +17,6 @@ import ( "sync" ) -func (m Sample) T() int64 { return m.Timestamp } -func (m Sample) V() float64 { return m.Value } - -func (h Histogram) IsFloatHistogram() bool { - _, ok := h.GetCount().(*Histogram_CountFloat) - return ok -} - func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { size := r.Size() data, ok := p.Get().(*[]byte) diff --git a/prompb/io/prometheus/write/v2/codec.go b/prompb/io/prometheus/write/v2/codec.go new file mode 100644 index 0000000000..25fa0d4035 --- /dev/null +++ b/prompb/io/prometheus/write/v2/codec.go @@ -0,0 +1,216 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" +) + +// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon. + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels { + return desymbolizeLabels(b, m.GetLabelsRefs(), symbols) +} + +// ToMetadata return model metadata from timeseries' remote metadata. +func (m TimeSeries) ToMetadata(symbols []string) metadata.Metadata { + typ := model.MetricTypeUnknown + switch m.Metadata.Type { + case Metadata_METRIC_TYPE_COUNTER: + typ = model.MetricTypeCounter + case Metadata_METRIC_TYPE_GAUGE: + typ = model.MetricTypeGauge + case Metadata_METRIC_TYPE_HISTOGRAM: + typ = model.MetricTypeHistogram + case Metadata_METRIC_TYPE_GAUGEHISTOGRAM: + typ = model.MetricTypeGaugeHistogram + case Metadata_METRIC_TYPE_SUMMARY: + typ = model.MetricTypeSummary + case Metadata_METRIC_TYPE_INFO: + typ = model.MetricTypeInfo + case Metadata_METRIC_TYPE_STATESET: + typ = model.MetricTypeStateset + } + return metadata.Metadata{ + Type: typ, + Unit: symbols[m.Metadata.UnitRef], + Help: symbols[m.Metadata.HelpRef], + } +} + +// FromMetadataType transforms a Prometheus metricType into writev2 metricType. +// Since the former is a string we need to transform it to an enum. +func FromMetadataType(t model.MetricType) Metadata_MetricType { + switch t { + case model.MetricTypeCounter: + return Metadata_METRIC_TYPE_COUNTER + case model.MetricTypeGauge: + return Metadata_METRIC_TYPE_GAUGE + case model.MetricTypeHistogram: + return Metadata_METRIC_TYPE_HISTOGRAM + case model.MetricTypeGaugeHistogram: + return Metadata_METRIC_TYPE_GAUGEHISTOGRAM + case model.MetricTypeSummary: + return Metadata_METRIC_TYPE_SUMMARY + case model.MetricTypeInfo: + return Metadata_METRIC_TYPE_INFO + case model.MetricTypeStateset: + return Metadata_METRIC_TYPE_STATESET + default: + return Metadata_METRIC_TYPE_UNSPECIFIED + } +} + +// IsFloatHistogram returns true if the histogram is float. +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + +// ToIntHistogram returns integer Prometheus histogram from the remote implementation +// of integer histogram. If it's a float histogram, the method returns nil. +func (h Histogram) ToIntHistogram() *histogram.Histogram { + if h.IsFloatHistogram() { + return nil + } + return &histogram.Histogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountInt(), + Count: h.GetCountInt(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeDeltas(), + CustomValues: h.GetCustomValues(), + } +} + +// ToFloatHistogram returns float Prometheus histogram from the remote implementation +// of float histogram. If the underlying implementation is an integer histogram, a +// conversion is performed. +func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { + if h.IsFloatHistogram() { + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountFloat(), + Count: h.GetCountFloat(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveCounts(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeCounts(), + CustomValues: h.GetCustomValues(), + } + } + // Conversion from integer histogram. + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.GetZeroCountInt()), + Count: float64(h.GetCountInt()), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()), + CustomValues: h.GetCustomValues(), + } +} + +func spansProtoToSpans(s []BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func deltasToCounts(deltas []int64) []float64 { + counts := make([]float64, len(deltas)) + var cur float64 + for i, d := range deltas { + cur += float64(d) + counts[i] = cur + } + return counts +} + +// FromIntHistogram returns remote Histogram from the integer Histogram. +func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram { + return Histogram{ + Count: &Histogram_CountInt{CountInt: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, + NegativeSpans: spansToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: spansToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + ResetHint: Histogram_ResetHint(h.CounterResetHint), + CustomValues: h.CustomValues, + Timestamp: timestamp, + } +} + +// FromFloatHistogram returns remote Histogram from the float Histogram. +func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram { + return Histogram{ + Count: &Histogram_CountFloat{CountFloat: fh.Count}, + Sum: fh.Sum, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, + NegativeSpans: spansToSpansProto(fh.NegativeSpans), + NegativeCounts: fh.NegativeBuckets, + PositiveSpans: spansToSpansProto(fh.PositiveSpans), + PositiveCounts: fh.PositiveBuckets, + ResetHint: Histogram_ResetHint(fh.CounterResetHint), + CustomValues: fh.CustomValues, + Timestamp: timestamp, + } +} + +func spansToSpansProto(s []histogram.Span) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) exemplar.Exemplar { + timestamp := m.Timestamp + + return exemplar.Exemplar{ + Labels: desymbolizeLabels(b, m.LabelsRefs, symbols), + Value: m.Value, + Ts: timestamp, + HasTs: timestamp != 0, + } +} diff --git a/prompb/io/prometheus/write/v2/custom.go b/prompb/io/prometheus/write/v2/custom.go new file mode 100644 index 0000000000..3aa778eb60 --- /dev/null +++ b/prompb/io/prometheus/write/v2/custom.go @@ -0,0 +1,165 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "slices" +) + +func (m Sample) T() int64 { return m.Timestamp } +func (m Sample) V() float64 { return m.Value } + +func (m *Request) OptimizedMarshal(dst []byte) ([]byte, error) { + siz := m.Size() + if cap(dst) < siz { + dst = make([]byte, siz) + } + n, err := m.OptimizedMarshalToSizedBuffer(dst[:siz]) + if err != nil { + return nil, err + } + return dst[:n], nil +} + +// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer, +// but calls OptimizedMarshalToSizedBuffer on the timeseries. +func (m *Request) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].OptimizedMarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Symbols) > 0 { + for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Symbols[iNdEx]) + copy(dAtA[i:], m.Symbols[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + return len(dAtA) - i, nil +} + +// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer, +// but marshals m.LabelsRefs in place without extra allocations. +func (m *TimeSeries) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreatedTimestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + + if len(m.LabelsRefs) > 0 { + // This is the trick: encode the varints in reverse order to make it easier + // to do it in place. Then reverse the whole thing. + var j10 int + start := i + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA[i-1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + i-- + j10++ + } + dAtA[i-1] = uint8(num) + i-- + j10++ + } + slices.Reverse(dAtA[i:start]) + // --- end of trick + + i = encodeVarintTypes(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} diff --git a/prompb/io/prometheus/write/v2/custom_test.go b/prompb/io/prometheus/write/v2/custom_test.go new file mode 100644 index 0000000000..139cbfb225 --- /dev/null +++ b/prompb/io/prometheus/write/v2/custom_test.go @@ -0,0 +1,97 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package writev2 + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestOptimizedMarshal(t *testing.T) { + for _, tt := range []struct { + name string + m *Request + }{ + { + name: "empty", + m: &Request{}, + }, + { + name: "simple", + m: &Request{ + Timeseries: []TimeSeries{ + { + LabelsRefs: []uint32{ + 0, 1, + 2, 3, + 4, 5, + 6, 7, + 8, 9, + 10, 11, + 12, 13, + 14, 15, + }, + + Samples: []Sample{{Value: 1, Timestamp: 0}}, + Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 1, Timestamp: 0}}, + Histograms: nil, + }, + { + LabelsRefs: []uint32{ + 0, 1, + 2, 3, + 4, 5, + 6, 7, + 8, 9, + 10, 11, + 12, 13, + 14, 15, + }, + Samples: []Sample{{Value: 2, Timestamp: 1}}, + Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 2, Timestamp: 1}}, + Histograms: nil, + }, + }, + Symbols: []string{ + "a", "b", + "c", "d", + "e", "f", + "g", "h", + "i", "j", + "k", "l", + "m", "n", + "o", "p", + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + // Keep the slice allocated to mimic what std Marshal + // would give to sized Marshal. + got := make([]byte, 0) + + // Should be the same as the standard marshal. + expected, err := tt.m.Marshal() + require.NoError(t, err) + got, err = tt.m.OptimizedMarshal(got) + require.NoError(t, err) + require.Equal(t, expected, got) + + // Unmarshal should work too. + m := &Request{} + require.NoError(t, m.Unmarshal(got)) + require.Equal(t, tt.m, m) + }) + } +} diff --git a/prompb/io/prometheus/write/v2/symbols.go b/prompb/io/prometheus/write/v2/symbols.go new file mode 100644 index 0000000000..f316a976f2 --- /dev/null +++ b/prompb/io/prometheus/write/v2/symbols.go @@ -0,0 +1,83 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import "github.com/prometheus/prometheus/model/labels" + +// SymbolsTable implements table for easy symbol use. +type SymbolsTable struct { + strings []string + symbolsMap map[string]uint32 +} + +// NewSymbolTable returns a symbol table. +func NewSymbolTable() SymbolsTable { + return SymbolsTable{ + // Empty string is required as a first element. + symbolsMap: map[string]uint32{"": 0}, + strings: []string{""}, + } +} + +// Symbolize adds (if not added before) a string to the symbols table, +// while returning its reference number. +func (t *SymbolsTable) Symbolize(str string) uint32 { + if ref, ok := t.symbolsMap[str]; ok { + return ref + } + ref := uint32(len(t.strings)) + t.strings = append(t.strings, str) + t.symbolsMap[str] = ref + return ref +} + +// SymbolizeLabels symbolize Prometheus labels. +func (t *SymbolsTable) SymbolizeLabels(lbls labels.Labels, buf []uint32) []uint32 { + result := buf[:0] + lbls.Range(func(l labels.Label) { + off := t.Symbolize(l.Name) + result = append(result, off) + off = t.Symbolize(l.Value) + result = append(result, off) + }) + return result +} + +// Symbols returns computes symbols table to put in e.g. Request.Symbols. +// As per spec, order does not matter. +func (t *SymbolsTable) Symbols() []string { + return t.strings +} + +// Reset clears symbols table. +func (t *SymbolsTable) Reset() { + // NOTE: Make sure to keep empty symbol. + t.strings = t.strings[:1] + for k := range t.symbolsMap { + if k == "" { + continue + } + delete(t.symbolsMap, k) + } +} + +// desymbolizeLabels decodes label references, with given symbols to labels. +func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels { + b.Reset() + for i := 0; i < len(labelRefs); i += 2 { + b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]]) + } + b.Sort() + return b.Labels() +} diff --git a/prompb/io/prometheus/write/v2/symbols_test.go b/prompb/io/prometheus/write/v2/symbols_test.go new file mode 100644 index 0000000000..3d852e88f1 --- /dev/null +++ b/prompb/io/prometheus/write/v2/symbols_test.go @@ -0,0 +1,60 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" +) + +func TestSymbolsTable(t *testing.T) { + s := NewSymbolTable() + require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist") + require.Equal(t, uint32(0), s.Symbolize("")) + require.Equal(t, []string{""}, s.Symbols()) + + require.Equal(t, uint32(1), s.Symbolize("abc")) + require.Equal(t, []string{"", "abc"}, s.Symbols()) + + require.Equal(t, uint32(2), s.Symbolize("__name__")) + require.Equal(t, []string{"", "abc", "__name__"}, s.Symbols()) + + require.Equal(t, uint32(3), s.Symbolize("foo")) + require.Equal(t, []string{"", "abc", "__name__", "foo"}, s.Symbols()) + + s.Reset() + require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist") + require.Equal(t, uint32(0), s.Symbolize("")) + + require.Equal(t, uint32(1), s.Symbolize("__name__")) + require.Equal(t, []string{"", "__name__"}, s.Symbols()) + + require.Equal(t, uint32(2), s.Symbolize("abc")) + require.Equal(t, []string{"", "__name__", "abc"}, s.Symbols()) + + ls := labels.FromStrings("__name__", "qwer", "zxcv", "1234") + encoded := s.SymbolizeLabels(ls, nil) + require.Equal(t, []uint32{1, 3, 4, 5}, encoded) + b := labels.NewScratchBuilder(len(encoded)) + decoded := desymbolizeLabels(&b, encoded, s.Symbols()) + require.Equal(t, ls, decoded) + + // Different buf. + ls = labels.FromStrings("__name__", "qwer", "zxcv2222", "1234") + encoded = s.SymbolizeLabels(ls, []uint32{1, 3, 4, 5}) + require.Equal(t, []uint32{1, 3, 6, 5}, encoded) +} diff --git a/prompb/io/prometheus/write/v2/types.pb.go b/prompb/io/prometheus/write/v2/types.pb.go new file mode 100644 index 0000000000..d6ea8398f7 --- /dev/null +++ b/prompb/io/prometheus/write/v2/types.pb.go @@ -0,0 +1,3241 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: io/prometheus/write/v2/types.proto + +package writev2 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Metadata_MetricType int32 + +const ( + Metadata_METRIC_TYPE_UNSPECIFIED Metadata_MetricType = 0 + Metadata_METRIC_TYPE_COUNTER Metadata_MetricType = 1 + Metadata_METRIC_TYPE_GAUGE Metadata_MetricType = 2 + Metadata_METRIC_TYPE_HISTOGRAM Metadata_MetricType = 3 + Metadata_METRIC_TYPE_GAUGEHISTOGRAM Metadata_MetricType = 4 + Metadata_METRIC_TYPE_SUMMARY Metadata_MetricType = 5 + Metadata_METRIC_TYPE_INFO Metadata_MetricType = 6 + Metadata_METRIC_TYPE_STATESET Metadata_MetricType = 7 +) + +var Metadata_MetricType_name = map[int32]string{ + 0: "METRIC_TYPE_UNSPECIFIED", + 1: "METRIC_TYPE_COUNTER", + 2: "METRIC_TYPE_GAUGE", + 3: "METRIC_TYPE_HISTOGRAM", + 4: "METRIC_TYPE_GAUGEHISTOGRAM", + 5: "METRIC_TYPE_SUMMARY", + 6: "METRIC_TYPE_INFO", + 7: "METRIC_TYPE_STATESET", +} + +var Metadata_MetricType_value = map[string]int32{ + "METRIC_TYPE_UNSPECIFIED": 0, + "METRIC_TYPE_COUNTER": 1, + "METRIC_TYPE_GAUGE": 2, + "METRIC_TYPE_HISTOGRAM": 3, + "METRIC_TYPE_GAUGEHISTOGRAM": 4, + "METRIC_TYPE_SUMMARY": 5, + "METRIC_TYPE_INFO": 6, + "METRIC_TYPE_STATESET": 7, +} + +func (x Metadata_MetricType) String() string { + return proto.EnumName(Metadata_MetricType_name, int32(x)) +} + +func (Metadata_MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{4, 0} +} + +type Histogram_ResetHint int32 + +const ( + Histogram_RESET_HINT_UNSPECIFIED Histogram_ResetHint = 0 + Histogram_RESET_HINT_YES Histogram_ResetHint = 1 + Histogram_RESET_HINT_NO Histogram_ResetHint = 2 + Histogram_RESET_HINT_GAUGE Histogram_ResetHint = 3 +) + +var Histogram_ResetHint_name = map[int32]string{ + 0: "RESET_HINT_UNSPECIFIED", + 1: "RESET_HINT_YES", + 2: "RESET_HINT_NO", + 3: "RESET_HINT_GAUGE", +} + +var Histogram_ResetHint_value = map[string]int32{ + "RESET_HINT_UNSPECIFIED": 0, + "RESET_HINT_YES": 1, + "RESET_HINT_NO": 2, + "RESET_HINT_GAUGE": 3, +} + +func (x Histogram_ResetHint) String() string { + return proto.EnumName(Histogram_ResetHint_name, int32(x)) +} + +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{5, 0} +} + +// Request represents a request to write the given timeseries to a remote destination. +// This message was introduced in the Remote Write 2.0 specification: +// https://prometheus.io/docs/concepts/remote_write_spec_2_0/ +// +// The canonical Content-Type request header value for this message is +// "application/x-protobuf;proto=io.prometheus.write.v2.Request" +// +// NOTE: gogoproto options might change in future for this file, they +// are not part of the spec proto (they only modify the generated Go code, not +// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908 +type Request struct { + // symbols contains a de-duplicated array of string elements used for various + // items in a Request message, like labels and metadata items. For the sender's convenience + // around empty values for optional fields like unit_ref, symbols array MUST start with + // empty string. + // + // To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you + // need to lookup the actual string by index from symbols array. The order of + // strings is up to the sender. The receiver should not assume any particular encoding. + Symbols []string `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"` + // timeseries represents an array of distinct series with 0 or more samples. + Timeseries []TimeSeries `protobuf:"bytes,5,rep,name=timeseries,proto3" json:"timeseries"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(m, src) +} +func (m *Request) XXX_Size() int { + return m.Size() +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetSymbols() []string { + if m != nil { + return m.Symbols + } + return nil +} + +func (m *Request) GetTimeseries() []TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +// TimeSeries represents a single series. +type TimeSeries struct { + // labels_refs is a list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's length is always + // a multiple of two, and the underlying labels should be sorted lexicographically. + // + // Note that there might be multiple TimeSeries objects in the same + // Requests with the same labels e.g. for different exemplars, metadata + // or created timestamp. + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // Timeseries messages can either specify samples or (native) histogram samples + // (histogram field), but not both. For a typical sender (real-time metric + // streaming), in healthy cases, there will be only one sample or histogram. + // + // Samples and histograms are sorted by timestamp (older first). + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Histograms []Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"` + // exemplars represents an optional set of exemplars attached to this series' samples. + Exemplars []Exemplar `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"` + // metadata represents the metadata associated with the given series' samples. + Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"` + // created_timestamp represents an optional created timestamp associated with + // this series' samples in ms format, typically for counter or histogram type + // metrics. Created timestamp represents the time when the counter started + // counting (sometimes referred to as start timestamp), which can increase + // the accuracy of query results. + // + // Note that some receivers might require this and in return fail to + // ingest such samples within the Request. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to + // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{1} +} +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return m.Size() +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetLabelsRefs() []uint32 { + if m != nil { + return m.LabelsRefs + } + return nil +} + +func (m *TimeSeries) GetSamples() []Sample { + if m != nil { + return m.Samples + } + return nil +} + +func (m *TimeSeries) GetHistograms() []Histogram { + if m != nil { + return m.Histograms + } + return nil +} + +func (m *TimeSeries) GetExemplars() []Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +func (m *TimeSeries) GetMetadata() Metadata { + if m != nil { + return m.Metadata + } + return Metadata{} +} + +func (m *TimeSeries) GetCreatedTimestamp() int64 { + if m != nil { + return m.CreatedTimestamp + } + return 0 +} + +// Exemplar is an additional information attached to some series' samples. +// It is typically used to attach an example trace or request ID associated with +// the metric changes. +type Exemplar struct { + // labels_refs is an optional list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's len is always + // a multiple of 2, and the underlying labels should be sorted lexicographically. + // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // value represents an exact example value. This can be useful when the exemplar + // is attached to a histogram, which only gives an estimated value through buckets. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + // timestamp represents an optional timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to + // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{2} +} +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return m.Size() +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabelsRefs() []uint32 { + if m != nil { + return m.LabelsRefs + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// Sample represents series sample. +type Sample struct { + // value of the sample. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{3} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Sample) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// Metadata represents the metadata associated with the given series' samples. +type Metadata struct { + Type Metadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=io.prometheus.write.v2.Metadata_MetricType" json:"type,omitempty"` + // help_ref is a reference to the Request.symbols array representing help + // text for the metric. Help is optional, reference should point to an empty string in + // such a case. + HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"` + // unit_ref is a reference to the Request.symbols array representing a unit + // for the metric. Unit is optional, reference should point to an empty string in + // such a case. + UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{4} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return m.Size() +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetType() Metadata_MetricType { + if m != nil { + return m.Type + } + return Metadata_METRIC_TYPE_UNSPECIFIED +} + +func (m *Metadata) GetHelpRef() uint32 { + if m != nil { + return m.HelpRef + } + return 0 +} + +func (m *Metadata) GetUnitRef() uint32 { + if m != nil { + return m.UnitRef + } + return 0 +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + // Types that are valid to be assigned to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` + // The schema defines the bucket schema. Currently, valid numbers + // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be + // added in future for new bucketing layouts. + // + // The schema equal to -53 means custom buckets. See + // custom_values field description for more details. + // + // Values between -4 and 8 represent base-2 bucket schema, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n (n is schema value) logarithmic buckets. Or in other words, + // each bucket boundary is the previous boundary times 2^(2^-n). + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + // Types that are valid to be assigned to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` + // Positive Buckets. + // + // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows: + // * The span offset+length points to an the index of the custom_values array + // or +Inf if pointing to the len of the array. + // * The counts and deltas have the same meaning as for exponential histograms. + PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=io.prometheus.write.v2.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // custom_values is an additional field used by non-exponential bucketing layouts. + // + // For custom buckets (-53 schema value) custom_values specify monotonically + // increasing upper inclusive boundaries for the bucket counts with arbitrary + // widths for this histogram. In other words, custom_values represents custom, + // explicit bucketing that could have been converted from the classic histograms. + // + // Those bounds are then referenced by spans in positive_spans with corresponding positive + // counts of deltas (refer to positive_spans for more details). This way we can + // have encode sparse histograms with custom bucketing (many buckets are often + // not used). + // + // Note that for custom bounds, even negative observations are placed in the positive + // counts to simplify the implementation and avoid ambiguity of where to place + // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and + // the zero bucket are unused, if the schema indicates custom bucketing. + // + // For each upper boundary the previous boundary represent the lower exclusive + // boundary for that bucket. The first element is the upper inclusive boundary + // for the first bucket, which implicitly has a lower inclusive bound of -Inf. + // This is similar to "le" label semantics on classic histograms. You may add a + // bucket with an upper bound of 0 to make sure that you really have no negative + // observations, but in practice, native histogram rendering will show both with + // or without first upper boundary 0 and no negative counts as the same case. + // + // The last element is not only the upper inclusive bound of the last regular + // bucket, but implicitly the lower exclusive bound of the +Inf bucket. + CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{5} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +type isHistogram_Count interface { + isHistogram_Count() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() + MarshalTo([]byte) (int, error) + Size() int +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"` +} +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"` +} +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"` +} +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} +func (*Histogram_CountFloat) isHistogram_Count() {} +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (m *Histogram) GetCountInt() uint64 { + if x, ok := m.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (m *Histogram) GetCountFloat() float64 { + if x, ok := m.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (m *Histogram) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCountInt() uint64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpans() []BucketSpan { + if m != nil { + return m.NegativeSpans + } + return nil +} + +func (m *Histogram) GetNegativeDeltas() []int64 { + if m != nil { + return m.NegativeDeltas + } + return nil +} + +func (m *Histogram) GetNegativeCounts() []float64 { + if m != nil { + return m.NegativeCounts + } + return nil +} + +func (m *Histogram) GetPositiveSpans() []BucketSpan { + if m != nil { + return m.PositiveSpans + } + return nil +} + +func (m *Histogram) GetPositiveDeltas() []int64 { + if m != nil { + return m.PositiveDeltas + } + return nil +} + +func (m *Histogram) GetPositiveCounts() []float64 { + if m != nil { + return m.PositiveCounts + } + return nil +} + +func (m *Histogram) GetResetHint() Histogram_ResetHint { + if m != nil { + return m.ResetHint + } + return Histogram_RESET_HINT_UNSPECIFIED +} + +func (m *Histogram) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *Histogram) GetCustomValues() []float64 { + if m != nil { + return m.CustomValues + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Histogram) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_f139519efd9fa8d7, []int{6} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +func init() { + proto.RegisterEnum("io.prometheus.write.v2.Metadata_MetricType", Metadata_MetricType_name, Metadata_MetricType_value) + proto.RegisterEnum("io.prometheus.write.v2.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) + proto.RegisterType((*Request)(nil), "io.prometheus.write.v2.Request") + proto.RegisterType((*TimeSeries)(nil), "io.prometheus.write.v2.TimeSeries") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.write.v2.Exemplar") + proto.RegisterType((*Sample)(nil), "io.prometheus.write.v2.Sample") + proto.RegisterType((*Metadata)(nil), "io.prometheus.write.v2.Metadata") + proto.RegisterType((*Histogram)(nil), "io.prometheus.write.v2.Histogram") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.write.v2.BucketSpan") +} + +func init() { + proto.RegisterFile("io/prometheus/write/v2/types.proto", fileDescriptor_f139519efd9fa8d7) +} + +var fileDescriptor_f139519efd9fa8d7 = []byte{ + // 926 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x5d, 0x6f, 0xe3, 0x44, + 0x14, 0xed, 0xc4, 0x69, 0x3e, 0x6e, 0x9a, 0xac, 0x33, 0xb4, 0x5d, 0x6f, 0x81, 0x6c, 0xd6, 0x08, + 0x88, 0x58, 0x29, 0x91, 0xc2, 0xeb, 0x0a, 0xd4, 0xb4, 0x6e, 0x93, 0x95, 0x92, 0xac, 0x26, 0x2e, + 0x52, 0x79, 0xb1, 0xdc, 0x64, 0x92, 0x58, 0xd8, 0xb1, 0xf1, 0x4c, 0x02, 0xe5, 0xf7, 0xf1, 0xb0, + 0x8f, 0xfc, 0x01, 0x10, 0xf4, 0x9d, 0xff, 0x80, 0x66, 0xfc, 0xd9, 0x42, 0xbb, 0xe2, 0x6d, 0xe6, + 0xdc, 0x73, 0xee, 0x3d, 0xb9, 0xbe, 0x77, 0x02, 0xba, 0xe3, 0xf7, 0x82, 0xd0, 0xf7, 0x28, 0x5f, + 0xd3, 0x2d, 0xeb, 0xfd, 0x14, 0x3a, 0x9c, 0xf6, 0x76, 0xfd, 0x1e, 0xbf, 0x0d, 0x28, 0xeb, 0x06, + 0xa1, 0xcf, 0x7d, 0x7c, 0xec, 0xf8, 0xdd, 0x8c, 0xd3, 0x95, 0x9c, 0xee, 0xae, 0x7f, 0x72, 0xb8, + 0xf2, 0x57, 0xbe, 0xa4, 0xf4, 0xc4, 0x29, 0x62, 0xeb, 0x0c, 0xca, 0x84, 0xfe, 0xb8, 0xa5, 0x8c, + 0x63, 0x0d, 0xca, 0xec, 0xd6, 0xbb, 0xf1, 0x5d, 0xa6, 0x15, 0xdb, 0x4a, 0xa7, 0x4a, 0x92, 0x2b, + 0x1e, 0x02, 0x70, 0xc7, 0xa3, 0x8c, 0x86, 0x0e, 0x65, 0xda, 0x7e, 0x5b, 0xe9, 0xd4, 0xfa, 0x7a, + 0xf7, 0xbf, 0xeb, 0x74, 0x4d, 0xc7, 0xa3, 0x33, 0xc9, 0x1c, 0x14, 0xdf, 0xff, 0xf1, 0x72, 0x8f, + 0xe4, 0xb4, 0x6f, 0x8b, 0x15, 0xa4, 0x16, 0xf5, 0xbf, 0x0b, 0x00, 0x19, 0x0d, 0xbf, 0x84, 0x9a, + 0x6b, 0xdf, 0x50, 0x97, 0x59, 0x21, 0x5d, 0x32, 0x0d, 0xb5, 0x95, 0x4e, 0x9d, 0x40, 0x04, 0x11, + 0xba, 0x64, 0xf8, 0x1b, 0x28, 0x33, 0xdb, 0x0b, 0x5c, 0xca, 0xb4, 0x82, 0x2c, 0xde, 0x7a, 0xac, + 0xf8, 0x4c, 0xd2, 0xe2, 0xc2, 0x89, 0x08, 0x5f, 0x02, 0xac, 0x1d, 0xc6, 0xfd, 0x55, 0x68, 0x7b, + 0x4c, 0x53, 0x64, 0x8a, 0x57, 0x8f, 0xa5, 0x18, 0x26, 0xcc, 0xc4, 0x7e, 0x26, 0xc5, 0xe7, 0x50, + 0xa5, 0x3f, 0x53, 0x2f, 0x70, 0xed, 0x30, 0x6a, 0x52, 0xad, 0xdf, 0x7e, 0x2c, 0x8f, 0x11, 0x13, + 0xe3, 0x34, 0x99, 0x10, 0x0f, 0xa0, 0xe2, 0x51, 0x6e, 0x2f, 0x6c, 0x6e, 0x6b, 0xfb, 0x6d, 0xf4, + 0x54, 0x92, 0x71, 0xcc, 0x8b, 0x93, 0xa4, 0x3a, 0xfc, 0x1a, 0x9a, 0xf3, 0x90, 0xda, 0x9c, 0x2e, + 0x2c, 0xd9, 0x5e, 0x6e, 0x7b, 0x81, 0x56, 0x6a, 0xa3, 0x8e, 0x42, 0xd4, 0x38, 0x60, 0x26, 0xb8, + 0x6e, 0x41, 0x25, 0x71, 0xf3, 0xe1, 0x66, 0x1f, 0xc2, 0xfe, 0xce, 0x76, 0xb7, 0x54, 0x2b, 0xb4, + 0x51, 0x07, 0x91, 0xe8, 0x82, 0x3f, 0x81, 0x6a, 0x56, 0x47, 0x91, 0x75, 0x32, 0x40, 0x7f, 0x03, + 0xa5, 0xa8, 0xf3, 0x99, 0x1a, 0x3d, 0xaa, 0x2e, 0x3c, 0x54, 0xff, 0x55, 0x80, 0x4a, 0xf2, 0x43, + 0xf1, 0xb7, 0x50, 0x14, 0xd3, 0x2c, 0xf5, 0x8d, 0xfe, 0xeb, 0x0f, 0x35, 0x46, 0x1c, 0x42, 0x67, + 0x6e, 0xde, 0x06, 0x94, 0x48, 0x21, 0x7e, 0x01, 0x95, 0x35, 0x75, 0x03, 0xf1, 0xf3, 0xa4, 0xd1, + 0x3a, 0x29, 0x8b, 0x3b, 0xa1, 0x4b, 0x11, 0xda, 0x6e, 0x1c, 0x2e, 0x43, 0xc5, 0x28, 0x24, 0xee, + 0x84, 0x2e, 0xf5, 0xdf, 0x11, 0x40, 0x96, 0x0a, 0x7f, 0x0c, 0xcf, 0xc7, 0x86, 0x49, 0x46, 0x67, + 0x96, 0x79, 0xfd, 0xce, 0xb0, 0xae, 0x26, 0xb3, 0x77, 0xc6, 0xd9, 0xe8, 0x62, 0x64, 0x9c, 0xab, + 0x7b, 0xf8, 0x39, 0x7c, 0x94, 0x0f, 0x9e, 0x4d, 0xaf, 0x26, 0xa6, 0x41, 0x54, 0x84, 0x8f, 0xa0, + 0x99, 0x0f, 0x5c, 0x9e, 0x5e, 0x5d, 0x1a, 0x6a, 0x01, 0xbf, 0x80, 0xa3, 0x3c, 0x3c, 0x1c, 0xcd, + 0xcc, 0xe9, 0x25, 0x39, 0x1d, 0xab, 0x0a, 0x6e, 0xc1, 0xc9, 0xbf, 0x14, 0x59, 0xbc, 0xf8, 0xb0, + 0xd4, 0xec, 0x6a, 0x3c, 0x3e, 0x25, 0xd7, 0xea, 0x3e, 0x3e, 0x04, 0x35, 0x1f, 0x18, 0x4d, 0x2e, + 0xa6, 0x6a, 0x09, 0x6b, 0x70, 0x78, 0x8f, 0x6e, 0x9e, 0x9a, 0xc6, 0xcc, 0x30, 0xd5, 0xb2, 0xfe, + 0x6b, 0x09, 0xaa, 0xe9, 0x64, 0xe3, 0x4f, 0xa1, 0x3a, 0xf7, 0xb7, 0x1b, 0x6e, 0x39, 0x1b, 0x2e, + 0x3b, 0x5d, 0x1c, 0xee, 0x91, 0x8a, 0x84, 0x46, 0x1b, 0x8e, 0x5f, 0x41, 0x2d, 0x0a, 0x2f, 0x5d, + 0xdf, 0xe6, 0xd1, 0x20, 0x0c, 0xf7, 0x08, 0x48, 0xf0, 0x42, 0x60, 0x58, 0x05, 0x85, 0x6d, 0x3d, + 0xd9, 0x60, 0x44, 0xc4, 0x11, 0x1f, 0x43, 0x89, 0xcd, 0xd7, 0xd4, 0xb3, 0x65, 0x6b, 0x9b, 0x24, + 0xbe, 0xe1, 0xcf, 0xa1, 0xf1, 0x0b, 0x0d, 0x7d, 0x8b, 0xaf, 0x43, 0xca, 0xd6, 0xbe, 0xbb, 0x90, + 0x33, 0x8f, 0x48, 0x5d, 0xa0, 0x66, 0x02, 0xe2, 0x2f, 0x62, 0x5a, 0xe6, 0xab, 0x24, 0x7d, 0x21, + 0x72, 0x20, 0xf0, 0xb3, 0xc4, 0xdb, 0x57, 0xa0, 0xe6, 0x78, 0x91, 0xc1, 0xb2, 0x34, 0x88, 0x48, + 0x23, 0x65, 0x46, 0x26, 0xa7, 0xd0, 0xd8, 0xd0, 0x95, 0xcd, 0x9d, 0x1d, 0xb5, 0x58, 0x60, 0x6f, + 0x98, 0x56, 0x79, 0xfa, 0xed, 0x1a, 0x6c, 0xe7, 0x3f, 0x50, 0x3e, 0x0b, 0xec, 0x4d, 0xbc, 0x70, + 0xf5, 0x44, 0x2f, 0x30, 0x86, 0xbf, 0x84, 0x67, 0x69, 0xc2, 0x05, 0x75, 0xb9, 0xcd, 0xb4, 0x6a, + 0x5b, 0xe9, 0x60, 0x92, 0xd6, 0x39, 0x97, 0xe8, 0x3d, 0xa2, 0x74, 0xca, 0x34, 0x68, 0x2b, 0x1d, + 0x94, 0x11, 0xa5, 0x4d, 0x26, 0x2c, 0x06, 0x3e, 0x73, 0x72, 0x16, 0x6b, 0xff, 0xd7, 0x62, 0xa2, + 0x4f, 0x2d, 0xa6, 0x09, 0x63, 0x8b, 0x07, 0x91, 0xc5, 0x04, 0xce, 0x2c, 0xa6, 0xc4, 0xd8, 0x62, + 0x3d, 0xb2, 0x98, 0xc0, 0xb1, 0xc5, 0xb7, 0x00, 0x21, 0x65, 0x94, 0x5b, 0x6b, 0xf1, 0x55, 0x1a, + 0x4f, 0xef, 0x65, 0x3a, 0x63, 0x5d, 0x22, 0x34, 0x43, 0x67, 0xc3, 0x49, 0x35, 0x4c, 0x8e, 0xf7, + 0x1f, 0x82, 0x67, 0x0f, 0x1e, 0x02, 0xfc, 0x19, 0xd4, 0xe7, 0x5b, 0xc6, 0x7d, 0xcf, 0x92, 0xcf, + 0x06, 0xd3, 0x54, 0x69, 0xe8, 0x20, 0x02, 0xbf, 0x93, 0x98, 0xbe, 0x80, 0x6a, 0x9a, 0x1a, 0x9f, + 0xc0, 0x31, 0x11, 0x13, 0x6e, 0x0d, 0x47, 0x13, 0xf3, 0xc1, 0x9a, 0x62, 0x68, 0xe4, 0x62, 0xd7, + 0xc6, 0x4c, 0x45, 0xb8, 0x09, 0xf5, 0x1c, 0x36, 0x99, 0xaa, 0x05, 0xb1, 0x49, 0x39, 0x28, 0xda, + 0x59, 0x65, 0x50, 0x86, 0x7d, 0xd9, 0x94, 0xc1, 0x01, 0x40, 0x36, 0x6f, 0xfa, 0x1b, 0x80, 0xec, + 0x03, 0x88, 0x91, 0xf7, 0x97, 0x4b, 0x46, 0xa3, 0x1d, 0x6a, 0x92, 0xf8, 0x26, 0x70, 0x97, 0x6e, + 0x56, 0x7c, 0x2d, 0x57, 0xa7, 0x4e, 0xe2, 0xdb, 0xe0, 0xe8, 0xfd, 0x5d, 0x0b, 0xfd, 0x76, 0xd7, + 0x42, 0x7f, 0xde, 0xb5, 0xd0, 0xf7, 0x65, 0xd9, 0xb4, 0x5d, 0xff, 0xa6, 0x24, 0xff, 0x8a, 0xbf, + 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xfc, 0x93, 0x1c, 0xde, 0x07, 0x00, 0x00, +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Symbols) > 0 { + for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Symbols[iNdEx]) + copy(dAtA[i:], m.Symbols[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreatedTimestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.LabelsRefs) > 0 { + dAtA3 := make([]byte, len(m.LabelsRefs)*10) + var j2 int + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j2++ + } + dAtA3[j2] = uint8(num) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA3[:j2]) + i = encodeVarintTypes(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.LabelsRefs) > 0 { + dAtA5 := make([]byte, len(m.LabelsRefs)*10) + var j4 int + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ + } + i -= j4 + copy(dAtA[i:], dAtA5[:j4]) + i = encodeVarintTypes(dAtA, i, uint64(j4)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.UnitRef != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.UnitRef)) + i-- + dAtA[i] = 0x20 + } + if m.HelpRef != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.HelpRef)) + i-- + dAtA[i] = 0x18 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.CustomValues) > 0 { + for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- { + f6 := math.Float64bits(float64(m.CustomValues[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.CustomValues)*8)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f7 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f7)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var j8 int + dAtA10 := make([]byte, len(m.PositiveDeltas)*10) + for _, num := range m.PositiveDeltas { + x9 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x9 >= 1<<7 { + dAtA10[j8] = uint8(uint64(x9)&0x7f | 0x80) + j8++ + x9 >>= 7 + } + dAtA10[j8] = uint8(x9) + j8++ + } + i -= j8 + copy(dAtA[i:], dAtA10[:j8]) + i = encodeVarintTypes(dAtA, i, uint64(j8)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f11 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f11)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var j12 int + dAtA14 := make([]byte, len(m.NegativeDeltas)*10) + for _, num := range m.NegativeDeltas { + x13 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x13 >= 1<<7 { + dAtA14[j12] = uint8(uint64(x13)&0x7f | 0x80) + j12++ + x13 >>= 7 + } + dAtA14[j12] = uint8(x13) + j12++ + } + i -= j12 + copy(dAtA[i:], dAtA14[:j12]) + i = encodeVarintTypes(dAtA, i, uint64(j12)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.ZeroCount != nil { + { + size := m.ZeroCount.Size() + i -= size + if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + if m.Count != nil { + { + size := m.Count.Size() + i -= size + if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Symbols) > 0 { + for _, s := range m.Symbols { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TimeSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.Metadata.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.CreatedTimestamp != 0 { + n += 1 + sovTypes(uint64(m.CreatedTimestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.HelpRef != 0 { + n += 1 + sovTypes(uint64(m.HelpRef)) + } + if m.UnitRef != 0 { + n += 1 + sovTypes(uint64(m.UnitRef)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += m.Count.Size() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozTypes(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != nil { + n += m.ZeroCount.Size() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + sovTypes(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if len(m.CustomValues) > 0 { + n += 2 + sovTypes(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram_CountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozTypes(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovTypes(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + m.CreatedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Metadata_MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType) + } + m.HelpRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HelpRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType) + } + m.UnitRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnitRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &Histogram_CountInt{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ZeroCount = &Histogram_ZeroCountInt{v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.CustomValues) == 0 { + m.CustomValues = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/prompb/io/prometheus/write/v2/types.proto b/prompb/io/prometheus/write/v2/types.proto new file mode 100644 index 0000000000..0cc7b8bc4a --- /dev/null +++ b/prompb/io/prometheus/write/v2/types.proto @@ -0,0 +1,260 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// NOTE: This file is also available on https://buf.build/prometheus/prometheus/docs/main:io.prometheus.write.v2 + +syntax = "proto3"; +package io.prometheus.write.v2; + +option go_package = "writev2"; + +import "gogoproto/gogo.proto"; + +// Request represents a request to write the given timeseries to a remote destination. +// This message was introduced in the Remote Write 2.0 specification: +// https://prometheus.io/docs/concepts/remote_write_spec_2_0/ +// +// The canonical Content-Type request header value for this message is +// "application/x-protobuf;proto=io.prometheus.write.v2.Request" +// +// NOTE: gogoproto options might change in future for this file, they +// are not part of the spec proto (they only modify the generated Go code, not +// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908 +message Request { + // Since Request supersedes 1.0 spec's prometheus.WriteRequest, we reserve the top-down message + // for the deterministic interop between those two, see types_test.go for details. + // Generally it's not needed, because Receivers must use the Content-Type header, but we want to + // be sympathetic to adopters with mistaken implementations and have deterministic error (empty + // message if you use the wrong proto schema). + reserved 1 to 3; + + // symbols contains a de-duplicated array of string elements used for various + // items in a Request message, like labels and metadata items. For the sender's convenience + // around empty values for optional fields like unit_ref, symbols array MUST start with + // empty string. + // + // To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you + // need to lookup the actual string by index from symbols array. The order of + // strings is up to the sender. The receiver should not assume any particular encoding. + repeated string symbols = 4; + // timeseries represents an array of distinct series with 0 or more samples. + repeated TimeSeries timeseries = 5 [(gogoproto.nullable) = false]; +} + +// TimeSeries represents a single series. +message TimeSeries { + // labels_refs is a list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's length is always + // a multiple of two, and the underlying labels should be sorted lexicographically. + // + // Note that there might be multiple TimeSeries objects in the same + // Requests with the same labels e.g. for different exemplars, metadata + // or created timestamp. + repeated uint32 labels_refs = 1; + + // Timeseries messages can either specify samples or (native) histogram samples + // (histogram field), but not both. For a typical sender (real-time metric + // streaming), in healthy cases, there will be only one sample or histogram. + // + // Samples and histograms are sorted by timestamp (older first). + repeated Sample samples = 2 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 3 [(gogoproto.nullable) = false]; + + // exemplars represents an optional set of exemplars attached to this series' samples. + repeated Exemplar exemplars = 4 [(gogoproto.nullable) = false]; + + // metadata represents the metadata associated with the given series' samples. + Metadata metadata = 5 [(gogoproto.nullable) = false]; + + // created_timestamp represents an optional created timestamp associated with + // this series' samples in ms format, typically for counter or histogram type + // metrics. Created timestamp represents the time when the counter started + // counting (sometimes referred to as start timestamp), which can increase + // the accuracy of query results. + // + // Note that some receivers might require this and in return fail to + // ingest such samples within the Request. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to + // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + int64 created_timestamp = 6; +} + +// Exemplar is an additional information attached to some series' samples. +// It is typically used to attach an example trace or request ID associated with +// the metric changes. +message Exemplar { + // labels_refs is an optional list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's len is always + // a multiple of 2, and the underlying labels should be sorted lexicographically. + // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. + repeated uint32 labels_refs = 1; + // value represents an exact example value. This can be useful when the exemplar + // is attached to a histogram, which only gives an estimated value through buckets. + double value = 2; + // timestamp represents an optional timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to + // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + int64 timestamp = 3; +} + +// Sample represents series sample. +message Sample { + // value of the sample. + double value = 1; + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + int64 timestamp = 2; +} + +// Metadata represents the metadata associated with the given series' samples. +message Metadata { + enum MetricType { + METRIC_TYPE_UNSPECIFIED = 0; + METRIC_TYPE_COUNTER = 1; + METRIC_TYPE_GAUGE = 2; + METRIC_TYPE_HISTOGRAM = 3; + METRIC_TYPE_GAUGEHISTOGRAM = 4; + METRIC_TYPE_SUMMARY = 5; + METRIC_TYPE_INFO = 6; + METRIC_TYPE_STATESET = 7; + } + MetricType type = 1; + // help_ref is a reference to the Request.symbols array representing help + // text for the metric. Help is optional, reference should point to an empty string in + // such a case. + uint32 help_ref = 3; + // unit_ref is a reference to the Request.symbols array representing a unit + // for the metric. Unit is optional, reference should point to an empty string in + // such a case. + uint32 unit_ref = 4; +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +message Histogram { + enum ResetHint { + RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly. + RESET_HINT_YES = 1; // This is the 1st histogram after a counter reset. + RESET_HINT_NO = 2; // There was no counter reset between this and the previous Histogram. + RESET_HINT_GAUGE = 3; // This is a gauge histogram where counter resets don't happen. + } + + oneof count { // Count of observations in the histogram. + uint64 count_int = 1; + double count_float = 2; + } + double sum = 3; // Sum of observations in the histogram. + + // The schema defines the bucket schema. Currently, valid numbers + // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be + // added in future for new bucketing layouts. + // + // The schema equal to -53 means custom buckets. See + // custom_values field description for more details. + // + // Values between -4 and 8 represent base-2 bucket schema, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n (n is schema value) logarithmic buckets. Or in other words, + // each bucket boundary is the previous boundary times 2^(2^-n). + sint32 schema = 4; + double zero_threshold = 5; // Breadth of the zero bucket. + oneof zero_count { // Count in zero bucket. + uint64 zero_count_int = 6; + double zero_count_float = 7; + } + + // Negative Buckets. + repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_counts = 10; // Absolute count of each bucket. + + // Positive Buckets. + // + // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows: + // * The span offset+length points to an the index of the custom_values array + // or +Inf if pointing to the len of the array. + // * The counts and deltas have the same meaning as for exponential histograms. + repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_counts = 13; // Absolute count of each bucket. + + ResetHint reset_hint = 14; + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + int64 timestamp = 15; + + // custom_values is an additional field used by non-exponential bucketing layouts. + // + // For custom buckets (-53 schema value) custom_values specify monotonically + // increasing upper inclusive boundaries for the bucket counts with arbitrary + // widths for this histogram. In other words, custom_values represents custom, + // explicit bucketing that could have been converted from the classic histograms. + // + // Those bounds are then referenced by spans in positive_spans with corresponding positive + // counts of deltas (refer to positive_spans for more details). This way we can + // have encode sparse histograms with custom bucketing (many buckets are often + // not used). + // + // Note that for custom bounds, even negative observations are placed in the positive + // counts to simplify the implementation and avoid ambiguity of where to place + // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and + // the zero bucket are unused, if the schema indicates custom bucketing. + // + // For each upper boundary the previous boundary represent the lower exclusive + // boundary for that bucket. The first element is the upper inclusive boundary + // for the first bucket, which implicitly has a lower inclusive bound of -Inf. + // This is similar to "le" label semantics on classic histograms. You may add a + // bucket with an upper bound of 0 to make sure that you really have no negative + // observations, but in practice, native histogram rendering will show both with + // or without first upper boundary 0 and no negative counts as the same case. + // + // The last element is not only the upper inclusive bound of the last regular + // bucket, but implicitly the lower exclusive bound of the +Inf bucket. + repeated double custom_values = 16; +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} diff --git a/prompb/io/prometheus/write/v2/types_test.go b/prompb/io/prometheus/write/v2/types_test.go new file mode 100644 index 0000000000..5b7622fc2f --- /dev/null +++ b/prompb/io/prometheus/write/v2/types_test.go @@ -0,0 +1,97 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/prompb" +) + +func TestInteropV2UnmarshalWithV1_DeterministicEmpty(t *testing.T) { + expectedV1Empty := &prompb.WriteRequest{} + for _, tc := range []struct{ incoming *Request }{ + { + incoming: &Request{}, // Technically wrong, should be at least empty string in symbol. + }, + { + incoming: &Request{ + Symbols: []string{""}, + }, // NOTE: Without reserved fields, failed with "corrupted" ghost TimeSeries element. + }, + { + incoming: &Request{ + Symbols: []string{"", "__name__", "metric1"}, + Timeseries: []TimeSeries{ + {LabelsRefs: []uint32{1, 2}}, + {Samples: []Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}}}, + }, // NOTE: Without reserved fields, proto: illegal wireType 7 + }, + }, + } { + t.Run("", func(t *testing.T) { + in, err := proto.Marshal(tc.incoming) + require.NoError(t, err) + + // Test accidental unmarshal of v2 payload with v1 proto. + out := &prompb.WriteRequest{} + require.NoError(t, proto.Unmarshal(in, out)) + + // Drop unknowns, we expect them when incoming payload had some fields. + // This field & method will be likely gone after gogo removal. + out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables. + + require.Equal(t, expectedV1Empty, out) + }) + } +} + +func TestInteropV1UnmarshalWithV2_DeterministicEmpty(t *testing.T) { + expectedV2Empty := &Request{} + for _, tc := range []struct{ incoming *prompb.WriteRequest }{ + { + incoming: &prompb.WriteRequest{}, + }, + { + incoming: &prompb.WriteRequest{ + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}}, + Samples: []prompb.Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}}, + }, + }, + }, + // NOTE: Without reserved fields, results in corrupted v2.Request.Symbols. + }, + } { + t.Run("", func(t *testing.T) { + in, err := proto.Marshal(tc.incoming) + require.NoError(t, err) + + // Test accidental unmarshal of v1 payload with v2 proto. + out := &Request{} + require.NoError(t, proto.Unmarshal(in, out)) + + // Drop unknowns, we expect them when incoming payload had some fields. + // This field & method will be likely gone after gogo removal. + out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables. + + require.Equal(t, expectedV2Empty, out) + }) + } +} diff --git a/prompb/rwcommon/codec_test.go b/prompb/rwcommon/codec_test.go new file mode 100644 index 0000000000..2ab95e0d19 --- /dev/null +++ b/prompb/rwcommon/codec_test.go @@ -0,0 +1,298 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rwcommon + +import ( + "testing" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" +) + +func TestToLabels(t *testing.T) { + expected := labels.FromStrings("__name__", "metric1", "foo", "bar") + + t.Run("v1", func(t *testing.T) { + ts := prompb.TimeSeries{Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}, {Name: "foo", Value: "bar"}}} + b := labels.NewScratchBuilder(2) + require.Equal(t, expected, ts.ToLabels(&b, nil)) + require.Equal(t, ts.Labels, prompb.FromLabels(expected, nil)) + require.Equal(t, ts.Labels, prompb.FromLabels(expected, ts.Labels)) + }) + t.Run("v2", func(t *testing.T) { + v2Symbols := []string{"", "__name__", "metric1", "foo", "bar"} + ts := writev2.TimeSeries{LabelsRefs: []uint32{1, 2, 3, 4}} + b := labels.NewScratchBuilder(2) + require.Equal(t, expected, ts.ToLabels(&b, v2Symbols)) + // No need for FromLabels in our prod code as we use symbol table to do so. + }) +} + +func TestFromMetadataType(t *testing.T) { + for _, tc := range []struct { + desc string + input model.MetricType + expectedV1 prompb.MetricMetadata_MetricType + expectedV2 writev2.Metadata_MetricType + }{ + { + desc: "with a single-word metric", + input: model.MetricTypeCounter, + expectedV1: prompb.MetricMetadata_COUNTER, + expectedV2: writev2.Metadata_METRIC_TYPE_COUNTER, + }, + { + desc: "with a two-word metric", + input: model.MetricTypeStateset, + expectedV1: prompb.MetricMetadata_STATESET, + expectedV2: writev2.Metadata_METRIC_TYPE_STATESET, + }, + { + desc: "with an unknown metric", + input: "not-known", + expectedV1: prompb.MetricMetadata_UNKNOWN, + expectedV2: writev2.Metadata_METRIC_TYPE_UNSPECIFIED, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + t.Run("v1", func(t *testing.T) { + require.Equal(t, tc.expectedV1, prompb.FromMetadataType(tc.input)) + }) + t.Run("v2", func(t *testing.T) { + require.Equal(t, tc.expectedV2, writev2.FromMetadataType(tc.input)) + }) + }) + } +} + +func TestToMetadata(t *testing.T) { + sym := writev2.NewSymbolTable() + + for _, tc := range []struct { + input writev2.Metadata + expected metadata.Metadata + }{ + { + input: writev2.Metadata{}, + expected: metadata.Metadata{ + Type: model.MetricTypeUnknown, + }, + }, + { + input: writev2.Metadata{ + Type: 12414, // Unknown. + }, + expected: metadata.Metadata{ + Type: model.MetricTypeUnknown, + }, + }, + { + input: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, + HelpRef: sym.Symbolize("help1"), + UnitRef: sym.Symbolize("unit1"), + }, + expected: metadata.Metadata{ + Type: model.MetricTypeCounter, + Help: "help1", + Unit: "unit1", + }, + }, + { + input: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_STATESET, + HelpRef: sym.Symbolize("help2"), + }, + expected: metadata.Metadata{ + Type: model.MetricTypeStateset, + Help: "help2", + }, + }, + } { + t.Run("", func(t *testing.T) { + ts := writev2.TimeSeries{Metadata: tc.input} + require.Equal(t, tc.expected, ts.ToMetadata(sym.Symbols())) + }) + } +} + +func TestToHistogram_Empty(t *testing.T) { + t.Run("v1", func(t *testing.T) { + require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "") + require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "") + }) + t.Run("v2", func(t *testing.T) { + require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "") + require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "") + }) +} + +// NOTE(bwplotka): This is technically not a valid histogram, but it represents +// important cases to test when copying or converting to/from int/float histograms. +func testIntHistogram() histogram.Histogram { + return histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + Count: 19, + Sum: 2.7, + ZeroThreshold: 1e-128, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 0}, + {Offset: 0, Length: 1}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, + CustomValues: []float64{21421, 523}, + } +} + +// NOTE(bwplotka): This is technically not a valid histogram, but it represents +// important cases to test when copying or converting to/from int/float histograms. +func testFloatHistogram() histogram.FloatHistogram { + return histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + Count: 19, + Sum: 2.7, + ZeroThreshold: 1e-128, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 0}, + {Offset: 0, Length: 1}, + }, + NegativeBuckets: []float64{1, 3, 1, 2, 1, 1}, + CustomValues: []float64{21421, 523}, + } +} + +func TestFromIntToFloatOrIntHistogram(t *testing.T) { + t.Run("v1", func(t *testing.T) { + // v1 does not support nhcb. + testIntHistWithoutNHCB := testIntHistogram() + testIntHistWithoutNHCB.CustomValues = nil + testFloatHistWithoutNHCB := testFloatHistogram() + testFloatHistWithoutNHCB.CustomValues = nil + + h := prompb.FromIntHistogram(123, &testIntHistWithoutNHCB) + require.False(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Equal(t, testIntHistWithoutNHCB, *h.ToIntHistogram()) + require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram()) + }) + t.Run("v2", func(t *testing.T) { + testIntHist := testIntHistogram() + testFloatHist := testFloatHistogram() + + h := writev2.FromIntHistogram(123, &testIntHist) + require.False(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Equal(t, testIntHist, *h.ToIntHistogram()) + require.Equal(t, testFloatHist, *h.ToFloatHistogram()) + }) +} + +func TestFromFloatToFloatHistogram(t *testing.T) { + t.Run("v1", func(t *testing.T) { + // v1 does not support nhcb. + testFloatHistWithoutNHCB := testFloatHistogram() + testFloatHistWithoutNHCB.CustomValues = nil + + h := prompb.FromFloatHistogram(123, &testFloatHistWithoutNHCB) + require.True(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Nil(t, h.ToIntHistogram()) + require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram()) + }) + t.Run("v2", func(t *testing.T) { + testFloatHist := testFloatHistogram() + + h := writev2.FromFloatHistogram(123, &testFloatHist) + require.True(t, h.IsFloatHistogram()) + require.Equal(t, int64(123), h.Timestamp) + require.Nil(t, h.ToIntHistogram()) + require.Equal(t, testFloatHist, *h.ToFloatHistogram()) + }) +} + +func TestFromIntOrFloatHistogram_ResetHint(t *testing.T) { + for _, tc := range []struct { + input histogram.CounterResetHint + expectedV1 prompb.Histogram_ResetHint + expectedV2 writev2.Histogram_ResetHint + }{ + { + input: histogram.UnknownCounterReset, + expectedV1: prompb.Histogram_UNKNOWN, + expectedV2: writev2.Histogram_RESET_HINT_UNSPECIFIED, + }, + { + input: histogram.CounterReset, + expectedV1: prompb.Histogram_YES, + expectedV2: writev2.Histogram_RESET_HINT_YES, + }, + { + input: histogram.NotCounterReset, + expectedV1: prompb.Histogram_NO, + expectedV2: writev2.Histogram_RESET_HINT_NO, + }, + { + input: histogram.GaugeType, + expectedV1: prompb.Histogram_GAUGE, + expectedV2: writev2.Histogram_RESET_HINT_GAUGE, + }, + } { + t.Run("", func(t *testing.T) { + t.Run("v1", func(t *testing.T) { + h := testIntHistogram() + h.CounterResetHint = tc.input + got := prompb.FromIntHistogram(1337, &h) + require.Equal(t, tc.expectedV1, got.GetResetHint()) + + fh := testFloatHistogram() + fh.CounterResetHint = tc.input + got2 := prompb.FromFloatHistogram(1337, &fh) + require.Equal(t, tc.expectedV1, got2.GetResetHint()) + }) + t.Run("v2", func(t *testing.T) { + h := testIntHistogram() + h.CounterResetHint = tc.input + got := writev2.FromIntHistogram(1337, &h) + require.Equal(t, tc.expectedV2, got.GetResetHint()) + + fh := testFloatHistogram() + fh.CounterResetHint = tc.input + got2 := writev2.FromFloatHistogram(1337, &fh) + require.Equal(t, tc.expectedV2, got2.GetResetHint()) + }) + }) + } +} diff --git a/promql/bench_test.go b/promql/bench_test.go index 516b0d7482..bd67280294 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "context" @@ -23,13 +23,14 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/teststorage" ) -func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error { +func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, interval, numIntervals int) error { ctx := context.Background() metrics := []labels.Labels{} @@ -186,6 +187,21 @@ func rangeQueryCases() []benchCase { { expr: "topk(5, a_X)", }, + { + expr: "limitk(1, a_X)", + }, + { + expr: "limitk(5, a_X)", + }, + { + expr: "limit_ratio(0.1, a_X)", + }, + { + expr: "limit_ratio(0.5, a_X)", + }, + { + expr: "limit_ratio(-0.5, a_X)", + }, // Combinations. { expr: "rate(a_X[1m]) + rate(b_X[1m])", @@ -249,13 +265,13 @@ func BenchmarkRangeQuery(b *testing.B) { stor := teststorage.New(b) stor.DB.DisableCompactions() // Don't want auto-compaction disrupting timings. defer stor.Close() - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 50000000, Timeout: 100 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) const interval = 10000 // 10s interval. // A day of data plus 10k steps. @@ -322,9 +338,17 @@ func BenchmarkNativeHistograms(b *testing.B) { name: "sum rate with long rate interval", query: "sum(rate(native_histogram_series[20m]))", }, + { + name: "histogram_count with short rate interval", + query: "histogram_count(sum(rate(native_histogram_series[2m])))", + }, + { + name: "histogram_count with long rate interval", + query: "histogram_count(sum(rate(native_histogram_series[20m])))", + }, } - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 50000000, @@ -338,7 +362,7 @@ func BenchmarkNativeHistograms(b *testing.B) { for _, tc := range cases { b.Run(tc.name, func(b *testing.B) { - ng := NewEngine(opts) + ng := promql.NewEngine(opts) for i := 0; i < b.N; i++ { qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step) if err != nil { diff --git a/promql/engine.go b/promql/engine.go index b8a8ea0959..25e67db633 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -573,7 +573,8 @@ func (ng *Engine) validateOpts(expr parser.Expr) error { return validationErr } -func (ng *Engine) newTestQuery(f func(context.Context) error) Query { +// NewTestQuery: inject special behaviour into Query for testing. +func (ng *Engine) NewTestQuery(f func(context.Context) error) Query { qry := &query{ q: "test statement", stmt: parser.TestStmt(f), @@ -751,6 +752,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval case parser.ValueTypeScalar: return Scalar{V: mat[0].Floats[0].F, T: start}, warnings, nil case parser.ValueTypeMatrix: + ng.sortMatrixResult(ctx, query, mat) return mat, warnings, nil default: panic(fmt.Errorf("promql.Engine.exec: unexpected expression type %q", s.Expr.Type())) @@ -789,11 +791,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval } // TODO(fabxc): where to ensure metric labels are a copy from the storage internals. + ng.sortMatrixResult(ctx, query, mat) + + return mat, warnings, nil +} + +func (ng *Engine) sortMatrixResult(ctx context.Context, query *query, mat Matrix) { sortSpanTimer, _ := query.stats.GetSpanTimer(ctx, stats.ResultSortTime, ng.metrics.queryResultSort) sort.Sort(mat) sortSpanTimer.Finish() - - return mat, warnings, nil } // subqueryTimes returns the sum of offsets and ranges of all subqueries in the path. @@ -979,6 +985,11 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations return nil, nil } series, ws, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet) + if e.SkipHistogramBuckets { + for i := range series { + series[i] = newHistogramStatsSeries(series[i]) + } + } e.Series = series return ws, err } @@ -1307,7 +1318,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping index, ok := groupToResultIndex[groupingKey] // Add a new group if it doesn't exist. if !ok { - if aggExpr.Op != parser.TOPK && aggExpr.Op != parser.BOTTOMK { + if aggExpr.Op != parser.TOPK && aggExpr.Op != parser.BOTTOMK && aggExpr.Op != parser.LIMITK && aggExpr.Op != parser.LIMIT_RATIO { m := generateGroupingLabels(enh, series.Metric, aggExpr.Without, sortedGrouping) result = append(result, Series{Metric: m}) } @@ -1320,9 +1331,10 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping groups := make([]groupedAggregation, groupCount) var k int + var ratio float64 var seriess map[uint64]Series switch aggExpr.Op { - case parser.TOPK, parser.BOTTOMK: + case parser.TOPK, parser.BOTTOMK, parser.LIMITK: if !convertibleToInt64(param) { ev.errorf("Scalar value %v overflows int64", param) } @@ -1334,6 +1346,23 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping return nil, warnings } seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash. + case parser.LIMIT_RATIO: + if math.IsNaN(param) { + ev.errorf("Ratio value %v is NaN", param) + } + switch { + case param == 0: + return nil, warnings + case param < -1.0: + ratio = -1.0 + warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange())) + case param > 1.0: + ratio = 1.0 + warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange())) + default: + ratio = param + } + seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash. case parser.QUANTILE: if math.IsNaN(param) || param < 0 || param > 1 { warnings.Add(annotations.NewInvalidQuantileWarning(param, aggExpr.Param.PositionRange())) @@ -1351,11 +1380,12 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping enh.Ts = ts var ws annotations.Annotations switch aggExpr.Op { - case parser.TOPK, parser.BOTTOMK: - result, ws = ev.aggregationK(aggExpr, k, inputMatrix, seriesToResult, groups, enh, seriess) + case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO: + result, ws = ev.aggregationK(aggExpr, k, ratio, inputMatrix, seriesToResult, groups, enh, seriess) // If this could be an instant query, shortcut so as not to change sort order. if ev.endTimestamp == ev.startTimestamp { - return result, ws + warnings.Merge(ws) + return result, warnings } default: ws = ev.aggregation(aggExpr, param, inputMatrix, result, seriesToResult, groups, enh) @@ -1370,7 +1400,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping // Assemble the output matrix. By the time we get here we know we don't have too many samples. switch aggExpr.Op { - case parser.TOPK, parser.BOTTOMK: + case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO: result = make(Matrix, 0, len(seriess)) for _, ss := range seriess { result = append(result, ss) @@ -1782,18 +1812,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio }, e.LHS, e.RHS) default: return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil + vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh) + return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil + vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh) + return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil + vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh) + return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } @@ -2024,25 +2057,21 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec vec := make(Vector, 0, len(vs.Series)) for i, s := range vs.Series { it := seriesIterators[i] - t, f, h, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) - if ok { - vec = append(vec, Sample{ - Metric: s.Labels(), - T: t, - F: f, - H: h, - }) - histSize := 0 - if h != nil { - histSize := h.Size() / 16 // 16 bytes per sample. - ev.currentSamples += histSize - } - ev.currentSamples++ + t, _, _, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) + if !ok { + continue + } - ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, int64(1+histSize)) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } + // Note that we ignore the sample values because call only cares about the timestamp. + vec = append(vec, Sample{ + Metric: s.Labels(), + T: t, + }) + + ev.currentSamples++ + ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, 1) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) } } ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -2430,12 +2459,12 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi } // VectorBinop evaluates a binary operation between two Vectors, excluding set operators. -func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { +func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, error) { if matching.Card == parser.CardManyToMany { panic("many-to-many only allowed for set operators") } if len(lhs) == 0 || len(rhs) == 0 { - return nil // Short-circuit: nothing is going to match. + return nil, nil // Short-circuit: nothing is going to match. } // The control flow below handles one-to-one or many-to-one matching. @@ -2488,6 +2517,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * // For all lhs samples find a respective rhs sample and perform // the binary operation. + var lastErr error for i, ls := range lhs { sig := lhsh[i].signature @@ -2503,7 +2533,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * fl, fr = fr, fl hl, hr = hr, hl } - floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) + floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr) + if err != nil { + lastErr = err + } switch { case returnBool: if keep { @@ -2545,7 +2578,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * H: histogramValue, }) } - return enh.Out + return enh.Out, lastErr } func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string { @@ -2608,7 +2641,8 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. -func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { +func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) (Vector, error) { + var lastErr error for _, lhsSample := range lhs { lf, rf := lhsSample.F, rhs.V var rh *histogram.FloatHistogram @@ -2619,7 +2653,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala lf, rf = rf, lf lh, rh = rh, lh } - float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh) + float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh) + if err != nil { + lastErr = err + } // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. if op.IsComparisonOperator() && swap { @@ -2643,7 +2680,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala enh.Out = append(enh.Out, lhsSample) } } - return enh.Out + return enh.Out, lastErr } // scalarBinop evaluates a binary operation between two Scalars. @@ -2680,62 +2717,71 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { } // vectorElemBinop evaluates a binary operation between two Vector elements. -func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) { +func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { switch op { case parser.ADD: if hlhs != nil && hrhs != nil { - return 0, hlhs.Copy().Add(hrhs).Compact(0), true + res, err := hlhs.Copy().Add(hrhs) + if err != nil { + return 0, nil, false, err + } + return 0, res.Compact(0), true, nil } - return lhs + rhs, nil, true + return lhs + rhs, nil, true, nil case parser.SUB: if hlhs != nil && hrhs != nil { - return 0, hlhs.Copy().Sub(hrhs).Compact(0), true + res, err := hlhs.Copy().Sub(hrhs) + if err != nil { + return 0, nil, false, err + } + return 0, res.Compact(0), true, nil } - return lhs - rhs, nil, true + return lhs - rhs, nil, true, nil case parser.MUL: if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Mul(rhs), true + return 0, hlhs.Copy().Mul(rhs), true, nil } if hlhs == nil && hrhs != nil { - return 0, hrhs.Copy().Mul(lhs), true + return 0, hrhs.Copy().Mul(lhs), true, nil } - return lhs * rhs, nil, true + return lhs * rhs, nil, true, nil case parser.DIV: if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Div(rhs), true + return 0, hlhs.Copy().Div(rhs), true, nil } - return lhs / rhs, nil, true + return lhs / rhs, nil, true, nil case parser.POW: - return math.Pow(lhs, rhs), nil, true + return math.Pow(lhs, rhs), nil, true, nil case parser.MOD: - return math.Mod(lhs, rhs), nil, true + return math.Mod(lhs, rhs), nil, true, nil case parser.EQLC: - return lhs, nil, lhs == rhs + return lhs, nil, lhs == rhs, nil case parser.NEQ: - return lhs, nil, lhs != rhs + return lhs, nil, lhs != rhs, nil case parser.GTR: - return lhs, nil, lhs > rhs + return lhs, nil, lhs > rhs, nil case parser.LSS: - return lhs, nil, lhs < rhs + return lhs, nil, lhs < rhs, nil case parser.GTE: - return lhs, nil, lhs >= rhs + return lhs, nil, lhs >= rhs, nil case parser.LTE: - return lhs, nil, lhs <= rhs + return lhs, nil, lhs <= rhs, nil case parser.ATAN2: - return math.Atan2(lhs, rhs), nil, true + return math.Atan2(lhs, rhs), nil, true, nil } panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) } type groupedAggregation struct { - seen bool // Was this output groups seen in the input at this timestamp. - hasFloat bool // Has at least 1 float64 sample aggregated. - hasHistogram bool // Has at least 1 histogram sample aggregated. - floatValue float64 - histogramValue *histogram.FloatHistogram - floatMean float64 - groupCount int - heap vectorByValueHeap + seen bool // Was this output groups seen in the input at this timestamp. + hasFloat bool // Has at least 1 float64 sample aggregated. + hasHistogram bool // Has at least 1 histogram sample aggregated. + floatValue float64 + histogramValue *histogram.FloatHistogram + floatMean float64 // Mean, or "compensating value" for Kahan summation. + groupCount int + groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group + heap vectorByValueHeap } // aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix. @@ -2761,11 +2807,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix *group = groupedAggregation{ seen: true, floatValue: f, - floatMean: f, groupCount: 1, } switch op { - case parser.SUM, parser.AVG: + case parser.AVG: + group.floatMean = f + fallthrough + case parser.SUM: if h == nil { group.hasFloat = true } else { @@ -2773,6 +2821,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix group.hasHistogram = true } case parser.STDVAR, parser.STDDEV: + group.floatMean = f group.floatValue = 0 case parser.QUANTILE: group.heap = make(vectorByValueHeap, 1) @@ -2788,14 +2837,17 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix if h != nil { group.hasHistogram = true if group.histogramValue != nil { - group.histogramValue.Add(h) + _, err := group.histogramValue.Add(h) + if err != nil { + handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + } } // Otherwise the aggregation contained floats // previously and will be invalid anyway. No // point in copying the histogram in that case. } else { group.hasFloat = true - group.floatValue += f + group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean) } case parser.AVG: @@ -2805,8 +2857,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix if group.histogramValue != nil { left := h.Copy().Div(float64(group.groupCount)) right := group.histogramValue.Copy().Div(float64(group.groupCount)) - toAdd := left.Sub(right) - group.histogramValue.Add(toAdd) + toAdd, err := left.Sub(right) + if err != nil { + handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + } + _, err = group.histogramValue.Add(toAdd) + if err != nil { + handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + } } // Otherwise the aggregation contained floats // previously and will be invalid anyway. No @@ -2906,6 +2964,8 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix } if aggr.hasHistogram { aggr.histogramValue.Compact(0) + } else { + aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term. } default: // For other aggregations, we already have the right value. @@ -2918,19 +2978,22 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix return annos } -// aggregationK evaluates topk or bottomk at one timestep on inputMatrix. +// aggregationK evaluates topk, bottomk, limitk, or limit_ratio at one timestep on inputMatrix. // Output that has the same labels as the input, but just k of them per group. // seriesToResult maps inputMatrix indexes to groups indexes. -// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk. +// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio. // For a range query, aggregates output in the seriess map. -func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { +func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { op := e.Op var s Sample var annos annotations.Annotations + // Used to short-cut the loop for LIMITK if we already collected k elements for every group + groupsRemaining := len(groups) for i := range groups { groups[i].seen = false } +seriesLoop: for si := range inputMatrix { f, _, ok := ev.nextValues(enh.Ts, &inputMatrix[si]) if !ok { @@ -2941,11 +3004,23 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma group := &groups[seriesToResult[si]] // Initialize this group if it's the first time we've seen it. if !group.seen { - *group = groupedAggregation{ - seen: true, - heap: make(vectorByValueHeap, 1, k), + // LIMIT_RATIO is a special case, as we may not add this very sample to the heap, + // while we also don't know the final size of it. + if op == parser.LIMIT_RATIO { + *group = groupedAggregation{ + seen: true, + heap: make(vectorByValueHeap, 0), + } + if ratiosampler.AddRatioSample(r, &s) { + heap.Push(&group.heap, &s) + } + } else { + *group = groupedAggregation{ + seen: true, + heap: make(vectorByValueHeap, 1, k), + } + group.heap[0] = s } - group.heap[0] = s continue } @@ -2976,6 +3051,26 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma } } + case parser.LIMITK: + if len(group.heap) < k { + heap.Push(&group.heap, &s) + } + // LIMITK optimization: early break if we've added K elem to _every_ group, + // especially useful for large timeseries where the user is exploring labels via e.g. + // limitk(10, my_metric) + if !group.groupAggrComplete && len(group.heap) == k { + group.groupAggrComplete = true + groupsRemaining-- + if groupsRemaining == 0 { + break seriesLoop + } + } + + case parser.LIMIT_RATIO: + if ratiosampler.AddRatioSample(r, &s) { + heap.Push(&group.heap, &s) + } + default: panic(fmt.Errorf("expected aggregation operator but got %q", op)) } @@ -3025,6 +3120,11 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma for _, v := range aggr.heap { add(v.Metric, v.F) } + + case parser.LIMITK, parser.LIMIT_RATIO: + for _, v := range aggr.heap { + add(v.Metric, v.F) + } } } @@ -3103,6 +3203,31 @@ func (ev *evaluator) nextValues(ts int64, series *Series) (f float64, h *histogr return f, h, true } +// handleAggregationError adds the appropriate annotation based on the aggregation error. +func handleAggregationError(err error, e *parser.AggregateExpr, metricName string, annos *annotations.Annotations) { + pos := e.Expr.PositionRange() + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } +} + +// handleVectorBinopError returns the appropriate annotation based on the vector binary operation error. +func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotations { + if err == nil { + return nil + } + metricName := "" + pos := e.PositionRange() + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } + return nil +} + // groupingKey builds and returns the grouping key for the given metric and // grouping labels. func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) { @@ -3182,6 +3307,8 @@ func unwrapStepInvariantExpr(e parser.Expr) parser.Expr { // PreprocessExpr wraps all possible step invariant parts of the given expression with // StepInvariantExpr. It also resolves the preprocessors. func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr { + detectHistogramStatsDecoding(expr) + isStepInvariant := preprocessExprHelper(expr, start, end) if isStepInvariant { return newStepInvariantExpr(expr) @@ -3316,8 +3443,106 @@ func setOffsetForAtModifier(evalTime int64, expr parser.Expr) { }) } +// detectHistogramStatsDecoding modifies the expression by setting the +// SkipHistogramBuckets field in those vector selectors for which it is safe to +// return only histogram statistics (sum and count), excluding histogram spans +// and buckets. The function can be treated as an optimization and is not +// required for correctness. +func detectHistogramStatsDecoding(expr parser.Expr) { + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + if n, ok := node.(*parser.BinaryExpr); ok { + detectHistogramStatsDecoding(n.LHS) + detectHistogramStatsDecoding(n.RHS) + return fmt.Errorf("stop") + } + + n, ok := (node).(*parser.VectorSelector) + if !ok { + return nil + } + + for _, p := range path { + call, ok := p.(*parser.Call) + if !ok { + continue + } + if call.Func.Name == "histogram_count" || call.Func.Name == "histogram_sum" { + n.SkipHistogramBuckets = true + break + } + if call.Func.Name == "histogram_quantile" || call.Func.Name == "histogram_fraction" { + n.SkipHistogramBuckets = false + break + } + } + return fmt.Errorf("stop") + }) +} + func makeInt64Pointer(val int64) *int64 { valp := new(int64) *valp = val return valp } + +// Add RatioSampler interface to allow unit-testing (previously: Randomizer). +type RatioSampler interface { + // Return this sample "offset" between [0.0, 1.0] + sampleOffset(ts int64, sample *Sample) float64 + AddRatioSample(r float64, sample *Sample) bool +} + +// Use Hash(labels.String()) / maxUint64 as a "deterministic" +// value in [0.0, 1.0]. +type HashRatioSampler struct{} + +var ratiosampler RatioSampler = NewHashRatioSampler() + +func NewHashRatioSampler() *HashRatioSampler { + return &HashRatioSampler{} +} + +func (s *HashRatioSampler) sampleOffset(ts int64, sample *Sample) float64 { + const ( + float64MaxUint64 = float64(math.MaxUint64) + ) + return float64(sample.Metric.Hash()) / float64MaxUint64 +} + +func (s *HashRatioSampler) AddRatioSample(ratioLimit float64, sample *Sample) bool { + // If ratioLimit >= 0: add sample if sampleOffset is lesser than ratioLimit + // + // 0.0 ratioLimit 1.0 + // [---------|--------------------------] + // [#########...........................] + // + // e.g.: + // sampleOffset==0.3 && ratioLimit==0.4 + // 0.3 < 0.4 ? --> add sample + // + // Else if ratioLimit < 0: add sample if rand() return the "complement" of ratioLimit>=0 case + // (loosely similar behavior to negative array index in other programming languages) + // + // 0.0 1+ratioLimit 1.0 + // [---------|--------------------------] + // [.........###########################] + // + // e.g.: + // sampleOffset==0.3 && ratioLimit==-0.6 + // 0.3 >= 0.4 ? --> don't add sample + sampleOffset := s.sampleOffset(sample.T, sample) + return (ratioLimit >= 0 && sampleOffset < ratioLimit) || + (ratioLimit < 0 && sampleOffset >= (1.0+ratioLimit)) +} + +type histogramStatsSeries struct { + storage.Series +} + +func newHistogramStatsSeries(series storage.Series) *histogramStatsSeries { + return &histogramStatsSeries{Series: series} +} + +func (s histogramStatsSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { + return NewHistogramStatsIterator(s.Series.Iterator(it)) +} diff --git a/promql/engine_internal_test.go b/promql/engine_internal_test.go new file mode 100644 index 0000000000..cb501b2fdf --- /dev/null +++ b/promql/engine_internal_test.go @@ -0,0 +1,82 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "errors" + "testing" + + "github.com/go-kit/log" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/util/annotations" +) + +func TestRecoverEvaluatorRuntime(t *testing.T) { + var output []interface{} + logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { + output = append(output, keyvals...) + return nil + })) + ev := &evaluator{logger: logger} + + expr, _ := parser.ParseExpr("sum(up)") + + var err error + + defer func() { + require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0") + require.Contains(t, output, "sum(up)") + }() + defer ev.recover(expr, nil, &err) + + // Cause a runtime panic. + var a []int + a[123] = 1 +} + +func TestRecoverEvaluatorError(t *testing.T) { + ev := &evaluator{logger: log.NewNopLogger()} + var err error + + e := errors.New("custom error") + + defer func() { + require.EqualError(t, err, e.Error()) + }() + defer ev.recover(nil, nil, &err) + + panic(e) +} + +func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) { + ev := &evaluator{logger: log.NewNopLogger()} + var err error + var ws annotations.Annotations + + warnings := annotations.New().Add(errors.New("custom warning")) + e := errWithWarnings{ + err: errors.New("custom error"), + warnings: warnings, + } + + defer func() { + require.EqualError(t, err, e.Error()) + require.Equal(t, warnings, ws, "wrong warning message") + }() + defer ev.recover(nil, &ws, &err) + + panic(e) +} diff --git a/promql/engine_test.go b/promql/engine_test.go index 0202c15ae1..99f3fc5bdd 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "context" @@ -20,28 +20,37 @@ import ( "math" "os" "sort" + "strconv" + "sync" "testing" "time" - "github.com/go-kit/log" - "github.com/stretchr/testify/require" "go.uber.org/goleak" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) +const ( + env = "query execution" + defaultLookbackDelta = 5 * time.Minute + defaultEpsilon = 0.000001 // Relative error allowed for sample values. +) + func TestMain(m *testing.M) { + // Enable experimental functions testing + parser.EnableExperimentalFunctions = true goleak.VerifyTestMain(m) } @@ -51,10 +60,12 @@ func TestQueryConcurrency(t *testing.T) { dir, err := os.MkdirTemp("", "test_concurrency") require.NoError(t, err) defer os.RemoveAll(dir) - queryTracker := NewActiveQueryTracker(dir, maxConcurrency, nil) - t.Cleanup(queryTracker.Close) + queryTracker := promql.NewActiveQueryTracker(dir, maxConcurrency, nil) + t.Cleanup(func() { + require.NoError(t, queryTracker.Close()) + }) - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, @@ -62,7 +73,7 @@ func TestQueryConcurrency(t *testing.T) { ActiveQueryTracker: queryTracker, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -84,9 +95,14 @@ func TestQueryConcurrency(t *testing.T) { return nil } + var wg sync.WaitGroup for i := 0; i < maxConcurrency; i++ { - q := engine.newTestQuery(f) - go q.Exec(ctx) + q := engine.NewTestQuery(f) + wg.Add(1) + go func() { + q.Exec(ctx) + wg.Done() + }() select { case <-processing: // Expected. @@ -95,8 +111,12 @@ func TestQueryConcurrency(t *testing.T) { } } - q := engine.newTestQuery(f) - go q.Exec(ctx) + q := engine.NewTestQuery(f) + wg.Add(1) + go func() { + q.Exec(ctx) + wg.Done() + }() select { case <-processing: @@ -119,20 +139,37 @@ func TestQueryConcurrency(t *testing.T) { for i := 0; i < maxConcurrency; i++ { block <- struct{}{} } + + wg.Wait() +} + +// contextDone returns an error if the context was canceled or timed out. +func contextDone(ctx context.Context, env string) error { + if err := ctx.Err(); err != nil { + switch { + case errors.Is(err, context.Canceled): + return promql.ErrQueryCanceled(env) + case errors.Is(err, context.DeadlineExceeded): + return promql.ErrQueryTimeout(env) + default: + return err + } + } + return nil } func TestQueryTimeout(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 5 * time.Millisecond, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { time.Sleep(100 * time.Millisecond) return contextDone(ctx, "test statement execution") }) @@ -140,20 +177,20 @@ func TestQueryTimeout(t *testing.T) { res := query.Exec(ctx) require.Error(t, res.Err, "expected timeout error but got none") - var e ErrQueryTimeout + var e promql.ErrQueryTimeout require.ErrorAs(t, res.Err, &e, "expected timeout error but got: %s", res.Err) } -const errQueryCanceled = ErrQueryCanceled("test statement execution") +const errQueryCanceled = promql.ErrQueryCanceled("test statement execution") func TestQueryCancel(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -161,13 +198,13 @@ func TestQueryCancel(t *testing.T) { block := make(chan struct{}) processing := make(chan struct{}) - query1 := engine.newTestQuery(func(ctx context.Context) error { + query1 := engine.NewTestQuery(func(ctx context.Context) error { processing <- struct{}{} <-block return contextDone(ctx, "test statement execution") }) - var res *Result + var res *promql.Result go func() { res = query1.Exec(ctx) @@ -183,7 +220,7 @@ func TestQueryCancel(t *testing.T) { require.Equal(t, errQueryCanceled, res.Err) // Canceling a query before starting it must have no effect. - query2 := engine.newTestQuery(func(ctx context.Context) error { + query2 := engine.NewTestQuery(func(ctx context.Context) error { return contextDone(ctx, "test statement execution") }) @@ -221,14 +258,14 @@ func (e errSeriesSet) Err() error { return e.err } func (e errSeriesSet) Warnings() annotations.Annotations { return nil } func TestQueryError(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) - errStorage := ErrStorage{errors.New("storage error")} + engine := promql.NewEngine(opts) + errStorage := promql.ErrStorage{errors.New("storage error")} queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return &errQuerier{err: errStorage}, nil }) @@ -270,7 +307,7 @@ func (h *hintRecordingQuerier) Select(ctx context.Context, sortSeries bool, hint } func TestSelectHintsSetCorrectly(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, @@ -561,11 +598,11 @@ func TestSelectHintsSetCorrectly(t *testing.T) { }, } { t.Run(tc.query, func(t *testing.T) { - engine := NewEngine(opts) + engine := promql.NewEngine(opts) hintsRecorder := &noopHintRecordingQueryable{} var ( - query Query + query promql.Query err error ) ctx := context.Background() @@ -586,13 +623,13 @@ func TestSelectHintsSetCorrectly(t *testing.T) { } func TestEngineShutdown(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) block := make(chan struct{}) @@ -605,13 +642,13 @@ func TestEngineShutdown(t *testing.T) { <-block return contextDone(ctx, "test statement execution") } - query1 := engine.newTestQuery(f) + query1 := engine.NewTestQuery(f) // Stopping the engine must cancel the base context. While executing queries is // still possible, their context is canceled from the beginning and execution should // terminate immediately. - var res *Result + var res *promql.Result go func() { res = query1.Exec(ctx) processing <- struct{}{} @@ -625,7 +662,7 @@ func TestEngineShutdown(t *testing.T) { require.Error(t, res.Err, "expected error on shutdown during query but got none") require.Equal(t, errQueryCanceled, res.Err) - query2 := engine.newTestQuery(func(context.Context) error { + query2 := engine.NewTestQuery(func(context.Context) error { require.FailNow(t, "reached query execution unexpectedly") return nil }) @@ -635,12 +672,12 @@ func TestEngineShutdown(t *testing.T) { res2 := query2.Exec(ctx) require.Error(t, res2.Err, "expected error on querying with canceled context but got none") - var e ErrQueryCanceled + var e promql.ErrQueryCanceled require.ErrorAs(t, res2.Err, &e, "expected cancellation error but got: %s", res2.Err) } func TestEngineEvalStmtTimestamps(t *testing.T) { - storage := LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 10s metric 1 2 `) @@ -657,13 +694,13 @@ load 10s // Instant queries. { Query: "1", - Result: Scalar{V: 1, T: 1000}, + Result: promql.Scalar{V: 1, T: 1000}, Start: time.Unix(1, 0), }, { Query: "metric", - Result: Vector{ - Sample{ + Result: promql.Vector{ + promql.Sample{ F: 1, T: 1000, Metric: labels.FromStrings("__name__", "metric"), @@ -673,9 +710,9 @@ load 10s }, { Query: "metric[20s]", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -684,9 +721,9 @@ load 10s // Range queries. { Query: "1", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, Metric: labels.EmptyLabels(), }, }, @@ -696,9 +733,9 @@ load 10s }, { Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -708,9 +745,9 @@ load 10s }, { Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -727,7 +764,7 @@ load 10s for i, c := range cases { t.Run(fmt.Sprintf("%d query=%s", i, c.Query), func(t *testing.T) { var err error - var qry Query + var qry promql.Query engine := newTestEngine() if c.Interval == 0 { qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) @@ -749,7 +786,7 @@ load 10s } func TestQueryStatistics(t *testing.T) { - storage := LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 10s metricWith1SampleEvery10Seconds 1+1x100 metricWith3SampleEvery10Seconds{a="1",b="1"} 1+1x100 @@ -799,10 +836,10 @@ load 10s { Query: "metricWith1HistogramEvery10Seconds", Start: time.Unix(21, 0), - PeakSamples: 12, - TotalSamples: 12, // 1 histogram sample of size 12 / 10 seconds + PeakSamples: 13, + TotalSamples: 13, // 1 histogram HPoint of size 13 / 10 seconds TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 21000: 12, + 21000: 13, }, }, { @@ -818,8 +855,8 @@ load 10s { Query: "timestamp(metricWith1HistogramEvery10Seconds)", Start: time.Unix(21, 0), - PeakSamples: 13, // histogram size 12 + 1 extra because of timestamp - TotalSamples: 1, // 1 float sample (because of timestamp) / 10 seconds + PeakSamples: 2, + TotalSamples: 1, // 1 float sample (because of timestamp) / 10 seconds TotalSamplesPerStep: stats.TotalSamplesPerStep{ 21000: 1, }, @@ -899,10 +936,10 @@ load 10s { Query: "metricWith1HistogramEvery10Seconds[60s]", Start: time.Unix(201, 0), - PeakSamples: 72, - TotalSamples: 72, // 1 histogram (size 12) / 10 seconds * 60 seconds + PeakSamples: 78, + TotalSamples: 78, // 1 histogram (size 13 HPoint) / 10 seconds * 60 seconds TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 201000: 72, + 201000: 78, }, }, { @@ -929,11 +966,11 @@ load 10s { Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]", Start: time.Unix(201, 0), - PeakSamples: 72, - TotalSamples: 312, // (1 histogram (size 12) / 10 seconds * 60 seconds) * 4 + 2 * 12 as + PeakSamples: 78, + TotalSamples: 338, // (1 histogram (size 13 HPoint) / 10 seconds * 60 seconds) * 4 + 2 * 13 as // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 201000: 312, + 201000: 338, }, }, { @@ -948,10 +985,10 @@ load 10s { Query: "metricWith1HistogramEvery10Seconds[60s] @ 30", Start: time.Unix(201, 0), - PeakSamples: 48, - TotalSamples: 48, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series + PeakSamples: 52, + TotalSamples: 52, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 201000: 48, + 201000: 52, }, }, { @@ -1086,13 +1123,13 @@ load 10s Start: time.Unix(204, 0), End: time.Unix(223, 0), Interval: 5 * time.Second, - PeakSamples: 48, - TotalSamples: 48, // 1 histogram (size 12) per query * 4 steps + PeakSamples: 52, + TotalSamples: 52, // 1 histogram (size 13 HPoint) per query * 4 steps TotalSamplesPerStep: stats.TotalSamplesPerStep{ - 204000: 12, // aligned to the step time, not the sample time - 209000: 12, - 214000: 12, - 219000: 12, + 204000: 13, // aligned to the step time, not the sample time + 209000: 13, + 214000: 13, + 219000: 13, }, }, { @@ -1116,7 +1153,7 @@ load 10s Start: time.Unix(201, 0), End: time.Unix(220, 0), Interval: 5 * time.Second, - PeakSamples: 16, + PeakSamples: 5, TotalSamples: 4, // 1 sample per query * 4 steps TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 1, @@ -1267,17 +1304,14 @@ load 10s }, } - engine := newTestEngine() - engine.enablePerStepStats = true - origMaxSamples := engine.maxSamplesPerQuery for _, c := range cases { t.Run(c.Query, func(t *testing.T) { - opts := NewPrometheusQueryOpts(true, 0) - engine.maxSamplesPerQuery = origMaxSamples + opts := promql.NewPrometheusQueryOpts(true, 0) + engine := promqltest.NewTestEngine(true, 0, promqltest.DefaultMaxSamplesPerQuery) runQuery := func(expErr error) *stats.Statistics { var err error - var qry Query + var qry promql.Query if c.Interval == 0 { qry, err = engine.NewInstantQuery(context.Background(), storage, opts, c.Query, c.Start) } else { @@ -1300,14 +1334,14 @@ load 10s if c.SkipMaxCheck { return } - engine.maxSamplesPerQuery = stats.Samples.PeakSamples - 1 - runQuery(ErrTooManySamples(env)) + engine = promqltest.NewTestEngine(true, 0, stats.Samples.PeakSamples-1) + runQuery(promql.ErrTooManySamples(env)) }) } } func TestMaxQuerySamples(t *testing.T) { - storage := LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 10s metric 1+1x100 bigmetric{a="1"} 1+1x100 @@ -1423,7 +1457,7 @@ load 10s Interval: 5 * time.Second, }, { - // Sample as above but with only 1 part as step invariant. + // promql.Sample as above but with only 1 part as step invariant. // Here the peak is caused by the non-step invariant part as it touches more time range. // Hence at peak it is 2*21 (subquery from 0s to 20s) // + 11 (buffer of a series per evaluation) @@ -1456,7 +1490,7 @@ load 10s engine := newTestEngine() testFunc := func(expError error) { var err error - var qry Query + var qry promql.Query if c.Interval == 0 { qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) } else { @@ -1474,19 +1508,19 @@ load 10s } // Within limit. - engine.maxSamplesPerQuery = c.MaxSamples + engine = promqltest.NewTestEngine(false, 0, c.MaxSamples) testFunc(nil) // Exceeding limit. - engine.maxSamplesPerQuery = c.MaxSamples - 1 - testFunc(ErrTooManySamples(env)) + engine = promqltest.NewTestEngine(false, 0, c.MaxSamples-1) + testFunc(promql.ErrTooManySamples(env)) }) } } func TestAtModifier(t *testing.T) { engine := newTestEngine() - storage := LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 10s metric{job="1"} 0+1x1000 metric{job="2"} 0+2x1000 @@ -1530,137 +1564,137 @@ load 1ms { // Time of the result is the evaluation time. query: `metric_neg @ 0`, start: 100, - result: Vector{ - Sample{F: 1, T: 100000, Metric: lblsneg}, + result: promql.Vector{ + promql.Sample{F: 1, T: 100000, Metric: lblsneg}, }, }, { query: `metric_neg @ -200`, start: 100, - result: Vector{ - Sample{F: 201, T: 100000, Metric: lblsneg}, + result: promql.Vector{ + promql.Sample{F: 201, T: 100000, Metric: lblsneg}, }, }, { query: `metric{job="2"} @ 50`, start: -2, end: 2, interval: 1, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 10, T: -2000}, {F: 10, T: -1000}, {F: 10, T: 0}, {F: 10, T: 1000}, {F: 10, T: 2000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10, T: -2000}, {F: 10, T: -1000}, {F: 10, T: 0}, {F: 10, T: 1000}, {F: 10, T: 2000}}, Metric: lbls2, }, }, }, { // Timestamps for matrix selector does not depend on the evaluation time. query: "metric[20s] @ 300", start: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}}, Metric: lbls1, }, - Series{ - Floats: []FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, }, { query: `metric_neg[2s] @ 0`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, }, { query: `metric_neg[3s] @ -500`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, Metric: lblsneg, }, }, }, { query: `metric_ms[3ms] @ 2.345`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, Metric: lblsms, }, }, }, { query: "metric[100s:25s] @ 300", start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, Metric: lbls1, }, - Series{ - Floats: []FPoint{{F: 40, T: 200000}, {F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 40, T: 200000}, {F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, }, { query: "metric_neg[50s:25s] @ 0", start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, }, { query: "metric_neg[50s:25s] @ -100", start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}}, Metric: lblsneg, }, }, }, { query: `metric_ms[100ms:25ms] @ 2.345`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 2250, T: 2250}, {F: 2275, T: 2275}, {F: 2300, T: 2300}, {F: 2325, T: 2325}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2250, T: 2250}, {F: 2275, T: 2275}, {F: 2300, T: 2300}, {F: 2325, T: 2325}}, Metric: lblsms, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ 100))`, start: 50, end: 80, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 995, T: 50000}, {F: 994, T: 60000}, {F: 993, T: 70000}, {F: 992, T: 80000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 995, T: 50000}, {F: 994, T: 60000}, {F: 993, T: 70000}, {F: 992, T: 80000}}, Metric: lblstopk3, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ 5000))`, start: 50, end: 80, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 10, T: 50000}, {F: 12, T: 60000}, {F: 14, T: 70000}, {F: 16, T: 80000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10, T: 50000}, {F: 12, T: 60000}, {F: 14, T: 70000}, {F: 16, T: 80000}}, Metric: lblstopk2, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ end()))`, start: 70, end: 100, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 993, T: 70000}, {F: 992, T: 80000}, {F: 991, T: 90000}, {F: 990, T: 100000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 993, T: 70000}, {F: 992, T: 80000}, {F: 991, T: 90000}, {F: 990, T: 100000}}, Metric: lblstopk3, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ start()))`, start: 100, end: 130, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 990, T: 100000}, {F: 989, T: 110000}, {F: 988, T: 120000}, {F: 987, T: 130000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 990, T: 100000}, {F: 989, T: 110000}, {F: 988, T: 120000}, {F: 987, T: 130000}}, Metric: lblstopk3, }, }, @@ -1669,9 +1703,9 @@ load 1ms // The trick here is that the query range should be > lookback delta. query: `timestamp(metric_timestamp @ 3600)`, start: 0, end: 7 * 60, interval: 60, - result: Matrix{ - Series{ - Floats: []FPoint{ + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{ {F: 3600, T: 0}, {F: 3600, T: 60 * 1000}, {F: 3600, T: 2 * 60 * 1000}, @@ -1694,7 +1728,7 @@ load 1ms } start, end, interval := time.Unix(c.start, 0), time.Unix(c.end, 0), time.Duration(c.interval)*time.Second var err error - var qry Query + var qry promql.Query if c.end == 0 { qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.query, start) } else { @@ -1704,76 +1738,19 @@ load 1ms res := qry.Exec(context.Background()) require.NoError(t, res.Err) - if expMat, ok := c.result.(Matrix); ok { + if expMat, ok := c.result.(promql.Matrix); ok { sort.Sort(expMat) - sort.Sort(res.Value.(Matrix)) + sort.Sort(res.Value.(promql.Matrix)) } testutil.RequireEqual(t, c.result, res.Value, "query %q failed", c.query) }) } } -func TestRecoverEvaluatorRuntime(t *testing.T) { - var output []interface{} - logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { - output = append(output, keyvals...) - return nil - })) - ev := &evaluator{logger: logger} - - expr, _ := parser.ParseExpr("sum(up)") - - var err error - - defer func() { - require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0") - require.Contains(t, output, "sum(up)") - }() - defer ev.recover(expr, nil, &err) - - // Cause a runtime panic. - var a []int - a[123] = 1 -} - -func TestRecoverEvaluatorError(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} - var err error - - e := errors.New("custom error") - - defer func() { - require.EqualError(t, err, e.Error()) - }() - defer ev.recover(nil, nil, &err) - - panic(e) -} - -func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} - var err error - var ws annotations.Annotations - - warnings := annotations.New().Add(errors.New("custom warning")) - e := errWithWarnings{ - err: errors.New("custom error"), - warnings: warnings, - } - - defer func() { - require.EqualError(t, err, e.Error()) - require.Equal(t, warnings, ws, "wrong warning message") - }() - defer ev.recover(nil, &ws, &err) - - panic(e) -} - func TestSubquerySelector(t *testing.T) { type caseType struct { Query string - Result Result + Result promql.Result Start time.Time } @@ -1787,11 +1764,11 @@ func TestSubquerySelector(t *testing.T) { cases: []caseType{ { Query: "metric[20s:10s]", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1801,11 +1778,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s]", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1815,11 +1792,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 2s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1829,11 +1806,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 6s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1843,11 +1820,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 4s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1857,11 +1834,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 5s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1871,11 +1848,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 6s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1885,11 +1862,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 7s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1908,11 +1885,11 @@ func TestSubquerySelector(t *testing.T) { cases: []caseType{ { // Normal selector. Query: `http_requests{group=~"pro.*",instance="0"}[30s:10s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 9990, T: 9990000}, {F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 9990, T: 9990000}, {F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1922,11 +1899,11 @@ func TestSubquerySelector(t *testing.T) { }, { // Default step. Query: `http_requests{group=~"pro.*",instance="0"}[5m:]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 9840, T: 9840000}, {F: 9900, T: 9900000}, {F: 9960, T: 9960000}, {F: 130, T: 10020000}, {F: 310, T: 10080000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 9840, T: 9840000}, {F: 9900, T: 9900000}, {F: 9960, T: 9960000}, {F: 130, T: 10020000}, {F: 310, T: 10080000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1936,11 +1913,11 @@ func TestSubquerySelector(t *testing.T) { }, { // Checking if high offset (>LookbackDelta) is being taken care of. Query: `http_requests{group=~"pro.*",instance="0"}[5m:] offset 20m`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 8640, T: 8640000}, {F: 8700, T: 8700000}, {F: 8760, T: 8760000}, {F: 8820, T: 8820000}, {F: 8880, T: 8880000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 8640, T: 8640000}, {F: 8700, T: 8700000}, {F: 8760, T: 8760000}, {F: 8820, T: 8820000}, {F: 8880, T: 8880000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1950,23 +1927,23 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `rate(http_requests[1m])[15s:5s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), }, - Series{ - Floats: []FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), }, - Series{ - Floats: []FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), }, - Series{ - Floats: []FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), }, }, @@ -1976,11 +1953,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 270, T: 90000}, {F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 270, T: 90000}, {F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -1990,11 +1967,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests)[40s:10s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 800, T: 80000}, {F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 800, T: 80000}, {F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -2004,11 +1981,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s:5s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1000, T: 100000}, {F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1000, T: 100000}, {F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -2021,7 +1998,7 @@ func TestSubquerySelector(t *testing.T) { } { t.Run("", func(t *testing.T) { engine := newTestEngine() - storage := LoadedStorage(t, tst.loadString) + storage := promqltest.LoadedStorage(t, tst.loadString) t.Cleanup(func() { storage.Close() }) for _, c := range tst.cases { @@ -2031,7 +2008,7 @@ func TestSubquerySelector(t *testing.T) { res := qry.Exec(context.Background()) require.Equal(t, c.Result.Err, res.Err) - mat := res.Value.(Matrix) + mat := res.Value.(promql.Matrix) sort.Sort(mat) testutil.RequireEqual(t, c.Result.Value, mat) }) @@ -2040,47 +2017,6 @@ func TestSubquerySelector(t *testing.T) { } } -func TestTimestampFunction_StepsMoreOftenThanSamples(t *testing.T) { - engine := newTestEngine() - storage := LoadedStorage(t, ` -load 1m - metric 0+1x1000 -`) - t.Cleanup(func() { storage.Close() }) - - query := "timestamp(metric)" - start := time.Unix(0, 0) - end := time.Unix(61, 0) - interval := time.Second - - // We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s. - expectedPoints := []FPoint{} - - for t := 0; t <= 59; t++ { - expectedPoints = append(expectedPoints, FPoint{F: 0, T: int64(t * 1000)}) - } - - expectedPoints = append( - expectedPoints, - FPoint{F: 60, T: 60_000}, - FPoint{F: 60, T: 61_000}, - ) - - expectedResult := Matrix{ - Series{ - Floats: expectedPoints, - Metric: labels.EmptyLabels(), - }, - } - - qry, err := engine.NewRangeQuery(context.Background(), storage, nil, query, start, end, interval) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - testutil.RequireEqual(t, expectedResult, res.Value) -} - type FakeQueryLogger struct { closed bool logs []interface{} @@ -2104,25 +2040,25 @@ func (f *FakeQueryLogger) Log(l ...interface{}) error { } func TestQueryLogger_basic(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) queryExec := func() { ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { return contextDone(ctx, "test statement execution") }) res := query.Exec(ctx) require.NoError(t, res.Err) } - // Query works without query log initialized. + // promql.Query works without query log initialized. queryExec() f1 := NewFakeQueryLogger() @@ -2155,21 +2091,21 @@ func TestQueryLogger_basic(t *testing.T) { } func TestQueryLogger_fields(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) - ctx = NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) + ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) defer cancelCtx() - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { return contextDone(ctx, "test statement execution") }) @@ -2184,22 +2120,22 @@ func TestQueryLogger_fields(t *testing.T) { } func TestQueryLogger_error(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) - ctx = NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) + ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) defer cancelCtx() testErr := errors.New("failure") - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { return testErr }) @@ -3005,7 +2941,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { t.Run(test.input, func(t *testing.T) { expr, err := parser.ParseExpr(test.input) require.NoError(t, err) - expr = PreprocessExpr(expr, startTime, endTime) + expr = promql.PreprocessExpr(expr, startTime, endTime) if test.outputTest { require.Equal(t, test.input, expr.String(), "error on input '%s'", test.input) } @@ -3016,64 +2952,64 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { func TestEngineOptsValidation(t *testing.T) { cases := []struct { - opts EngineOpts + opts promql.EngineOpts query string fail bool expError error }{ { - opts: EngineOpts{EnableAtModifier: false}, - query: "metric @ 100", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "metric @ 100", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1m] @ 100)", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1m] @ 100)", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1h:1m] @ 100)", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1h:1m] @ 100)", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "metric @ start()", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "metric @ start()", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1m] @ start())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1m] @ start())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1h:1m] @ start())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1h:1m] @ start())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "metric @ end()", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "metric @ end()", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1m] @ end())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1m] @ end())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1h:1m] @ end())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1h:1m] @ end())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: true}, + opts: promql.EngineOpts{EnableAtModifier: true}, query: "metric @ 100", }, { - opts: EngineOpts{EnableAtModifier: true}, + opts: promql.EngineOpts{EnableAtModifier: true}, query: "rate(metric[1m] @ start())", }, { - opts: EngineOpts{EnableAtModifier: true}, + opts: promql.EngineOpts{EnableAtModifier: true}, query: "rate(metric[1h:1m] @ end())", }, { - opts: EngineOpts{EnableNegativeOffset: false}, - query: "metric offset -1s", fail: true, expError: ErrValidationNegativeOffsetDisabled, + opts: promql.EngineOpts{EnableNegativeOffset: false}, + query: "metric offset -1s", fail: true, expError: promql.ErrValidationNegativeOffsetDisabled, }, { - opts: EngineOpts{EnableNegativeOffset: true}, + opts: promql.EngineOpts{EnableNegativeOffset: true}, query: "metric offset -1s", }, { - opts: EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, + opts: promql.EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, query: "metric @ 100 offset -2m", }, { - opts: EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, + opts: promql.EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, query: "metric offset -2m @ 100", }, } for _, c := range cases { - eng := NewEngine(c.opts) + eng := promql.NewEngine(c.opts) _, err1 := eng.NewInstantQuery(context.Background(), nil, nil, c.query, time.Unix(10, 0)) _, err2 := eng.NewRangeQuery(context.Background(), nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) if c.fail { @@ -3086,1237 +3022,82 @@ func TestEngineOptsValidation(t *testing.T) { } } -func TestRangeQuery(t *testing.T) { - cases := []struct { - Name string - Load string - Query string - Result parser.Value - Start time.Time - End time.Time - Interval time.Duration +func TestInstantQueryWithRangeVectorSelector(t *testing.T) { + engine := newTestEngine() + + baseT := timestamp.Time(0) + storage := promqltest.LoadedStorage(t, ` + load 1m + some_metric{env="1"} 0+1x4 + some_metric{env="2"} 0+2x4 + some_metric_with_stale_marker 0 1 stale 3 + `) + t.Cleanup(func() { require.NoError(t, storage.Close()) }) + + testCases := map[string]struct { + expr string + expected promql.Matrix + ts time.Time }{ - { - Name: "sum_over_time with all values", - Load: `load 30s - bar 0 1 10 100 1000`, - Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, - Metric: labels.EmptyLabels(), + "matches series with points in range": { + expr: "some_metric[1m]", + ts: baseT.Add(2 * time.Minute), + expected: promql.Matrix{ + { + Metric: labels.FromStrings("__name__", "some_metric", "env", "1"), + Floats: []promql.FPoint{ + {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1}, + {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 2}, + }, + }, + { + Metric: labels.FromStrings("__name__", "some_metric", "env", "2"), + Floats: []promql.FPoint{ + {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 2}, + {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 4}, + }, }, }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 60 * time.Second, }, - { - Name: "sum_over_time with trailing values", - Load: `load 30s - bar 0 1 10 100 1000 0 0 0 0`, - Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, - Metric: labels.EmptyLabels(), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 60 * time.Second, + "matches no series": { + expr: "some_nonexistent_metric[1m]", + ts: baseT, + expected: promql.Matrix{}, }, - { - Name: "sum_over_time with all values long", - Load: `load 30s - bar 0 1 10 100 1000 10000 100000 1000000 10000000`, - Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}, {F: 110000, T: 180000}, {F: 11000000, T: 240000}}, - Metric: labels.EmptyLabels(), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(240, 0), - Interval: 60 * time.Second, + "no samples in range": { + expr: "some_metric[1m]", + ts: baseT.Add(20 * time.Minute), + expected: promql.Matrix{}, }, - { - Name: "sum_over_time with all values random", - Load: `load 30s - bar 5 17 42 2 7 905 51`, - Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 5, T: 0}, {F: 59, T: 60000}, {F: 9, T: 120000}, {F: 956, T: 180000}}, - Metric: labels.EmptyLabels(), + "metric with stale marker": { + expr: "some_metric_with_stale_marker[3m]", + ts: baseT.Add(3 * time.Minute), + expected: promql.Matrix{ + { + Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"), + Floats: []promql.FPoint{ + {T: timestamp.FromTime(baseT), F: 0}, + {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1}, + {T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3}, + }, }, }, - Start: time.Unix(0, 0), - End: time.Unix(180, 0), - Interval: 60 * time.Second, - }, - { - Name: "metric query", - Load: `load 30s - metric 1+1x4`, - Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings("__name__", "metric"), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 1 * time.Minute, - }, - { - Name: "metric query with trailing values", - Load: `load 30s - metric 1+1x8`, - Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings("__name__", "metric"), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 1 * time.Minute, - }, - { - Name: "short-circuit", - Load: `load 30s - foo{job="1"} 1+1x4 - bar{job="2"} 1+1x4`, - Query: `foo > 2 or bar`, - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings( - "__name__", "bar", - "job", "2", - ), - }, - Series{ - Floats: []FPoint{{F: 3, T: 60000}, {F: 5, T: 120000}}, - Metric: labels.FromStrings( - "__name__", "foo", - "job", "1", - ), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 1 * time.Minute, - }, - { - Name: "drop-metric-name", - Load: `load 30s - requests{job="1", __address__="bar"} 100`, - Query: `requests * 2`, - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 200, T: 0}, {F: 200, T: 60000}, {F: 200, T: 120000}}, - Metric: labels.FromStrings( - "__address__", "bar", - "job", "1", - ), - }, - }, - Start: time.Unix(0, 0), - End: time.Unix(120, 0), - Interval: 1 * time.Minute, }, } - for _, c := range cases { - t.Run(c.Name, func(t *testing.T) { - engine := newTestEngine() - storage := LoadedStorage(t, c.Load) - t.Cleanup(func() { storage.Close() }) - qry, err := engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval) + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + q, err := engine.NewInstantQuery(context.Background(), storage, nil, testCase.expr, testCase.ts) require.NoError(t, err) + defer q.Close() - res := qry.Exec(context.Background()) + res := q.Exec(context.Background()) require.NoError(t, res.Err) - testutil.RequireEqual(t, c.Result, res.Value) + testutil.RequireEqual(t, testCase.expected, res.Value) }) } } -func TestNativeHistogramRate(t *testing.T) { - // TODO(beorn7): Integrate histograms into the PromQL testing framework - // and write more tests there. - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - - app := storage.Appender(context.Background()) - for i, h := range tsdbutil.GenerateTestHistograms(100) { - _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h, nil) - require.NoError(t, err) - } - require.NoError(t, app.Commit()) - - queryString := fmt.Sprintf("rate(%s[45s])", seriesName) - t.Run("instant_query", func(t *testing.T) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) - require.NoError(t, err) - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - vector, err := res.Vector() - require.NoError(t, err) - require.Len(t, vector, 1) - actualHistogram := vector[0].H - expectedHistogram := &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 1, - ZeroThreshold: 0.001, - ZeroCount: 1. / 15., - Count: 9. / 15., - Sum: 1.2266666666666663, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - } - require.Equal(t, expectedHistogram, actualHistogram) - }) - - t.Run("range_query", func(t *testing.T) { - step := 30 * time.Second - start := timestamp.Time(int64(5 * time.Minute / time.Millisecond)) - end := start.Add(step) - qry, err := engine.NewRangeQuery(context.Background(), storage, nil, queryString, start, end, step) - require.NoError(t, err) - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - matrix, err := res.Matrix() - require.NoError(t, err) - require.Len(t, matrix, 1) - require.Len(t, matrix[0].Histograms, 2) - actualHistograms := matrix[0].Histograms - expectedHistograms := []HPoint{{ - T: 300000, - H: &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 1, - ZeroThreshold: 0.001, - ZeroCount: 1. / 15., - Count: 9. / 15., - Sum: 1.2266666666666663, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - }, - }, { - T: 330000, - H: &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 1, - ZeroThreshold: 0.001, - ZeroCount: 1. / 15., - Count: 9. / 15., - Sum: 1.2266666666666663, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - }, - }} - require.Equal(t, expectedHistograms, actualHistograms) - }) -} - -func TestNativeFloatHistogramRate(t *testing.T) { - // TODO(beorn7): Integrate histograms into the PromQL testing framework - // and write more tests there. - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - - app := storage.Appender(context.Background()) - for i, fh := range tsdbutil.GenerateTestFloatHistograms(100) { - _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), nil, fh) - require.NoError(t, err) - } - require.NoError(t, app.Commit()) - - queryString := fmt.Sprintf("rate(%s[1m])", seriesName) - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) - require.NoError(t, err) - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - vector, err := res.Vector() - require.NoError(t, err) - require.Len(t, vector, 1) - actualHistogram := vector[0].H - expectedHistogram := &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 1, - ZeroThreshold: 0.001, - ZeroCount: 1. / 15., - Count: 9. / 15., - Sum: 1.226666666666667, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - } - require.Equal(t, expectedHistogram, actualHistogram) -} - -func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - h := &histogram.Histogram{ - Count: 24, - ZeroCount: 4, - ZeroThreshold: 0.001, - Sum: 100, - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - } - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - - ts := int64(10 * time.Minute / time.Millisecond) - app := storage.Appender(context.Background()) - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - queryString := fmt.Sprintf("histogram_count(%s)", seriesName) - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if floatHisto { - require.Equal(t, h.ToFloat(nil).Count, vector[0].F) - } else { - require.Equal(t, float64(h.Count), vector[0].F) - } - - queryString = fmt.Sprintf("histogram_sum(%s)", seriesName) - qry, err = engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res = qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err = res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if floatHisto { - require.Equal(t, h.ToFloat(nil).Sum, vector[0].F) - } else { - require.Equal(t, h.Sum, vector[0].F) - } - }) - } -} - -func TestNativeHistogram_HistogramStdDevVar(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - testCases := []struct { - name string - h *histogram.Histogram - stdVar float64 - }{ - { - name: "1, 2, 3, 4 low-res", - h: &histogram.Histogram{ - Count: 4, - Sum: 10, - Schema: 2, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 1}, - {Offset: 3, Length: 1}, - {Offset: 2, Length: 2}, - }, - PositiveBuckets: []int64{1, 0, 0, 0}, - }, - stdVar: 1.163807968526718, // actual variance: 1.25 - }, - { - name: "1, 2, 3, 4 hi-res", - h: &histogram.Histogram{ - Count: 4, - Sum: 10, - Schema: 8, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 1}, - {Offset: 255, Length: 1}, - {Offset: 149, Length: 1}, - {Offset: 105, Length: 1}, - }, - PositiveBuckets: []int64{1, 0, 0, 0}, - }, - stdVar: 1.2471347737158793, // actual variance: 1.25 - }, - { - name: "-50, -8, 0, 3, 8, 9, 100", - h: &histogram.Histogram{ - Count: 7, - ZeroCount: 1, - Sum: 62, - Schema: 3, - PositiveSpans: []histogram.Span{ - {Offset: 13, Length: 1}, - {Offset: 10, Length: 1}, - {Offset: 1, Length: 1}, - {Offset: 27, Length: 1}, - }, - PositiveBuckets: []int64{1, 0, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 24, Length: 1}, - {Offset: 21, Length: 1}, - }, - NegativeBuckets: []int64{1, 0}, - }, - stdVar: 1844.4651144196398, // actual variance: 1738.4082 - }, - { - name: "-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3", - h: &histogram.Histogram{ - Count: 10, - ZeroCount: 0, - Sum: -112946, - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 2, Length: 3}, - {Offset: 1, Length: 2}, - {Offset: 2, Length: 1}, - {Offset: 3, Length: 1}, - {Offset: 2, Length: 1}, - }, - NegativeBuckets: []int64{1, 0, 0, 0, 0, 2, -2, 0}, - }, - stdVar: 759352122.1939945, // actual variance: 882690990 - }, - { - name: "-10 x10", - h: &histogram.Histogram{ - Count: 10, - ZeroCount: 0, - Sum: -100, - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 4, Length: 1}, - }, - NegativeBuckets: []int64{10}, - }, - stdVar: 1.725830020304794, // actual variance: 0 - }, - { - name: "-50, -8, 0, 3, 8, 9, 100, NaN", - h: &histogram.Histogram{ - Count: 8, - ZeroCount: 1, - Sum: math.NaN(), - Schema: 3, - PositiveSpans: []histogram.Span{ - {Offset: 13, Length: 1}, - {Offset: 10, Length: 1}, - {Offset: 1, Length: 1}, - {Offset: 27, Length: 1}, - }, - PositiveBuckets: []int64{1, 0, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 24, Length: 1}, - {Offset: 21, Length: 1}, - }, - NegativeBuckets: []int64{1, 0}, - }, - stdVar: math.NaN(), - }, - { - name: "-50, -8, 0, 3, 8, 9, 100, +Inf", - h: &histogram.Histogram{ - Count: 7, - ZeroCount: 1, - Sum: math.Inf(1), - Schema: 3, - PositiveSpans: []histogram.Span{ - {Offset: 13, Length: 1}, - {Offset: 10, Length: 1}, - {Offset: 1, Length: 1}, - {Offset: 27, Length: 1}, - }, - PositiveBuckets: []int64{1, 0, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 24, Length: 1}, - {Offset: 21, Length: 1}, - }, - NegativeBuckets: []int64{1, 0}, - }, - stdVar: math.NaN(), - }, - } - for _, tc := range testCases { - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("%s floatHistogram=%t", tc.name, floatHisto), func(t *testing.T) { - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - - ts := int64(10 * time.Minute / time.Millisecond) - app := storage.Appender(context.Background()) - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, tc.h.ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, tc.h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - queryString := fmt.Sprintf("histogram_stdvar(%s)", seriesName) - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - require.InEpsilon(t, tc.stdVar, vector[0].F, 1e-12) - - queryString = fmt.Sprintf("histogram_stddev(%s)", seriesName) - qry, err = engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res = qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err = res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - require.InEpsilon(t, math.Sqrt(tc.stdVar), vector[0].F, 1e-12) - }) - } - } -} - -func TestNativeHistogram_HistogramQuantile(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - type subCase struct { - quantile string - value float64 - } - - cases := []struct { - text string - // Histogram to test. - h *histogram.Histogram - // Different quantiles to test for this histogram. - subCases []subCase - }{ - { - text: "all positive buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - }, - subCases: []subCase{ - { - quantile: "1.0001", - value: math.Inf(1), - }, - { - quantile: "1", - value: 16, - }, - { - quantile: "0.99", - value: 15.759999999999998, - }, - { - quantile: "0.9", - value: 13.600000000000001, - }, - { - quantile: "0.6", - value: 4.799999999999997, - }, - { - quantile: "0.5", - value: 1.6666666666666665, - }, - { // Zero bucket. - quantile: "0.1", - value: 0.0006000000000000001, - }, - { - quantile: "0", - value: 0, - }, - { - quantile: "-1", - value: math.Inf(-1), - }, - }, - }, - { - text: "all negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - subCases: []subCase{ - { - quantile: "1.0001", - value: math.Inf(1), - }, - { // Zero bucket. - quantile: "1", - value: 0, - }, - { // Zero bucket. - quantile: "0.99", - value: -6.000000000000048e-05, - }, - { // Zero bucket. - quantile: "0.9", - value: -0.0005999999999999996, - }, - { - quantile: "0.5", - value: -1.6666666666666667, - }, - { - quantile: "0.1", - value: -13.6, - }, - { - quantile: "0", - value: -16, - }, - { - quantile: "-1", - value: math.Inf(-1), - }, - }, - }, - { - text: "both positive and negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 24, - ZeroCount: 4, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - subCases: []subCase{ - { - quantile: "1.0001", - value: math.Inf(1), - }, - { - quantile: "1", - value: 16, - }, - { - quantile: "0.99", - value: 15.519999999999996, - }, - { - quantile: "0.9", - value: 11.200000000000003, - }, - { - quantile: "0.7", - value: 1.2666666666666657, - }, - { // Zero bucket. - quantile: "0.55", - value: 0.0006000000000000005, - }, - { // Zero bucket. - quantile: "0.5", - value: 0, - }, - { // Zero bucket. - quantile: "0.45", - value: -0.0005999999999999996, - }, - { - quantile: "0.3", - value: -1.266666666666667, - }, - { - quantile: "0.1", - value: -11.2, - }, - { - quantile: "0.01", - value: -15.52, - }, - { - quantile: "0", - value: -16, - }, - { - quantile: "-1", - value: math.Inf(-1), - }, - }, - }, - } - - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - idx := int64(0) - for _, floatHisto := range []bool{true, false} { - for _, c := range cases { - t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - ts := idx * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - for j, sc := range c.subCases { - t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) { - queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName) - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - require.True(t, almostEqual(sc.value, vector[0].F, defaultEpsilon)) - }) - } - idx++ - }) - } - } -} - -func TestNativeHistogram_HistogramFraction(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - type subCase struct { - lower, upper string - value float64 - } - - invariantCases := []subCase{ - { - lower: "42", - upper: "3.1415", - value: 0, - }, - { - lower: "0", - upper: "0", - value: 0, - }, - { - lower: "0.000001", - upper: "0.000001", - value: 0, - }, - { - lower: "42", - upper: "42", - value: 0, - }, - { - lower: "-3.1", - upper: "-3.1", - value: 0, - }, - { - lower: "3.1415", - upper: "NaN", - value: math.NaN(), - }, - { - lower: "NaN", - upper: "42", - value: math.NaN(), - }, - { - lower: "NaN", - upper: "NaN", - value: math.NaN(), - }, - { - lower: "-Inf", - upper: "+Inf", - value: 1, - }, - } - - cases := []struct { - text string - // Histogram to test. - h *histogram.Histogram - // Different ranges to test for this histogram. - subCases []subCase - }{ - { - text: "empty histogram", - h: &histogram.Histogram{}, - subCases: []subCase{ - { - lower: "3.1415", - upper: "42", - value: math.NaN(), - }, - }, - }, - { - text: "all positive buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4 - }, - subCases: append([]subCase{ - { - lower: "0", - upper: "+Inf", - value: 1, - }, - { - lower: "-Inf", - upper: "0", - value: 0, - }, - { - lower: "-0.001", - upper: "0", - value: 0, - }, - { - lower: "0", - upper: "0.001", - value: 2. / 12., - }, - { - lower: "0", - upper: "0.0005", - value: 1. / 12., - }, - { - lower: "0.001", - upper: "inf", - value: 10. / 12., - }, - { - lower: "-inf", - upper: "-0.001", - value: 0, - }, - { - lower: "1", - upper: "2", - value: 3. / 12., - }, - { - lower: "1.5", - upper: "2", - value: 1.5 / 12., - }, - { - lower: "1", - upper: "8", - value: 4. / 12., - }, - { - lower: "1", - upper: "6", - value: 3.5 / 12., - }, - { - lower: "1.5", - upper: "6", - value: 2. / 12., - }, - { - lower: "-2", - upper: "-1", - value: 0, - }, - { - lower: "-2", - upper: "-1.5", - value: 0, - }, - { - lower: "-8", - upper: "-1", - value: 0, - }, - { - lower: "-6", - upper: "-1", - value: 0, - }, - { - lower: "-6", - upper: "-1.5", - value: 0, - }, - }, invariantCases...), - }, - { - text: "all negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - subCases: append([]subCase{ - { - lower: "0", - upper: "+Inf", - value: 0, - }, - { - lower: "-Inf", - upper: "0", - value: 1, - }, - { - lower: "-0.001", - upper: "0", - value: 2. / 12., - }, - { - lower: "0", - upper: "0.001", - value: 0, - }, - { - lower: "-0.0005", - upper: "0", - value: 1. / 12., - }, - { - lower: "0.001", - upper: "inf", - value: 0, - }, - { - lower: "-inf", - upper: "-0.001", - value: 10. / 12., - }, - { - lower: "1", - upper: "2", - value: 0, - }, - { - lower: "1.5", - upper: "2", - value: 0, - }, - { - lower: "1", - upper: "8", - value: 0, - }, - { - lower: "1", - upper: "6", - value: 0, - }, - { - lower: "1.5", - upper: "6", - value: 0, - }, - { - lower: "-2", - upper: "-1", - value: 3. / 12., - }, - { - lower: "-2", - upper: "-1.5", - value: 1.5 / 12., - }, - { - lower: "-8", - upper: "-1", - value: 4. / 12., - }, - { - lower: "-6", - upper: "-1", - value: 3.5 / 12., - }, - { - lower: "-6", - upper: "-1.5", - value: 2. / 12., - }, - }, invariantCases...), - }, - { - text: "both positive and negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 24, - ZeroCount: 4, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - subCases: append([]subCase{ - { - lower: "0", - upper: "+Inf", - value: 0.5, - }, - { - lower: "-Inf", - upper: "0", - value: 0.5, - }, - { - lower: "-0.001", - upper: "0", - value: 2. / 24, - }, - { - lower: "0", - upper: "0.001", - value: 2. / 24., - }, - { - lower: "-0.0005", - upper: "0.0005", - value: 2. / 24., - }, - { - lower: "0.001", - upper: "inf", - value: 10. / 24., - }, - { - lower: "-inf", - upper: "-0.001", - value: 10. / 24., - }, - { - lower: "1", - upper: "2", - value: 3. / 24., - }, - { - lower: "1.5", - upper: "2", - value: 1.5 / 24., - }, - { - lower: "1", - upper: "8", - value: 4. / 24., - }, - { - lower: "1", - upper: "6", - value: 3.5 / 24., - }, - { - lower: "1.5", - upper: "6", - value: 2. / 24., - }, - { - lower: "-2", - upper: "-1", - value: 3. / 24., - }, - { - lower: "-2", - upper: "-1.5", - value: 1.5 / 24., - }, - { - lower: "-8", - upper: "-1", - value: 4. / 24., - }, - { - lower: "-6", - upper: "-1", - value: 3.5 / 24., - }, - { - lower: "-6", - upper: "-1.5", - value: 2. / 24., - }, - }, invariantCases...), - }, - } - idx := int64(0) - for _, floatHisto := range []bool{true, false} { - for _, c := range cases { - t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - - ts := idx * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - for j, sc := range c.subCases { - t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) { - queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName) - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if math.IsNaN(sc.value) { - require.True(t, math.IsNaN(vector[0].F)) - return - } - require.Equal(t, sc.value, vector[0].F) - }) - } - idx++ - }) - } - } -} - func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { // TODO(codesome): Integrate histograms into the PromQL testing framework // and write more tests there. @@ -4444,7 +3225,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { _, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42) require.NoError(t, err) for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) + lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) // Since we mutate h later, we need to create a copy here. var err error if floatHisto { @@ -4466,7 +3247,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { } require.NoError(t, app.Commit()) - queryAndCheck := func(queryString string, ts int64, exp Vector) { + queryAndCheck := func(queryString string, ts int64, exp promql.Vector) { qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) @@ -4490,7 +3271,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { // sum(). queryString := fmt.Sprintf("sum(%s)", seriesName) - queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) queryString = `sum({idx="0"})` var annos annotations.Annotations @@ -4502,26 +3283,26 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { for idx := 1; idx < len(c.histograms); idx++ { queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) } - queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) // count(). queryString = fmt.Sprintf("count(%s)", seriesName) - queryAndCheck(queryString, ts, []Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) // avg(). queryString = fmt.Sprintf("avg(%s)", seriesName) - queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) offset := int64(len(c.histograms) - 1) newTs := ts + offset*int64(time.Minute/time.Millisecond) // sum_over_time(). queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}}) // avg_over_time(). queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) }) idx0++ } @@ -4716,7 +3497,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { ts := idx0 * int64(10*time.Minute/time.Millisecond) app := storage.Appender(context.Background()) for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) + lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) // Since we mutate h later, we need to create a copy here. var err error if floatHisto { @@ -4728,7 +3509,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { } require.NoError(t, app.Commit()) - queryAndCheck := func(queryString string, exp Vector) { + queryAndCheck := func(queryString string, exp promql.Vector) { qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) @@ -4756,7 +3537,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { for idx := 1; idx < len(c.histograms); idx++ { queryString += fmt.Sprintf(` - ignoring(idx) %s{idx="%d"}`, seriesName, idx) } - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) }) } idx0++ @@ -4886,7 +3667,7 @@ func TestNativeHistogram_MulDivOperator(t *testing.T) { require.NoError(t, err) require.NoError(t, app.Commit()) - queryAndCheck := func(queryString string, exp Vector) { + queryAndCheck := func(queryString string, exp promql.Vector) { qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) @@ -4901,27 +3682,27 @@ func TestNativeHistogram_MulDivOperator(t *testing.T) { // histogram * scalar. queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) // scalar * histogram. queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) // histogram * float. queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) // float * histogram. queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) // histogram / scalar. queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) // histogram / float. queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) }) idx0++ } @@ -4991,20 +3772,17 @@ metric 0 1 2 for _, c := range cases { c := c t.Run(c.name, func(t *testing.T) { - engine := newTestEngine() - storage := LoadedStorage(t, load) + engine := promqltest.NewTestEngine(false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery) + storage := promqltest.LoadedStorage(t, load) t.Cleanup(func() { storage.Close() }) - if c.engineLookback != 0 { - engine.lookbackDelta = c.engineLookback - } - opts := NewPrometheusQueryOpts(false, c.queryLookback) + opts := promql.NewPrometheusQueryOpts(false, c.queryLookback) qry, err := engine.NewInstantQuery(context.Background(), storage, opts, query, c.ts) require.NoError(t, err) res := qry.Exec(context.Background()) require.NoError(t, res.Err) - vec, ok := res.Value.(Vector) + vec, ok := res.Value.(promql.Vector) require.True(t, ok) if c.expectSamples { require.NotEmpty(t, vec) @@ -5014,3 +3792,9 @@ metric 0 1 2 }) } } + +func makeInt64Pointer(val int64) *int64 { + valp := new(int64) + *valp = val + return valp +} diff --git a/promql/functions.go b/promql/functions.go index 2e15a14672..dcc2cd7590 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -14,6 +14,7 @@ package promql import ( + "errors" "fmt" "math" "slices" @@ -210,14 +211,28 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra } h := last.CopyToSchema(minSchema) - h.Sub(prev) + _, err := h.Sub(prev) + if err != nil { + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } + } if isCounter { // Second iteration to deal with counter resets. for _, currPoint := range points[1:] { curr := currPoint.H if curr.DetectReset(prev) { - h.Add(prev) + _, err := h.Add(prev) + if err != nil { + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + } + } } prev = curr } @@ -513,10 +528,11 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) return append(enh.Out, Sample{F: aggrFn(el)}) } -func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector { +func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) { el := vals[0].(Matrix)[0] + res, err := aggrFn(el) - return append(enh.Out, Sample{H: aggrFn(el)}) + return append(enh.Out, Sample{H: res}), err } // === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === @@ -528,18 +544,33 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { count := 1 mean := s.Histograms[0].H.Copy() for _, h := range s.Histograms[1:] { count++ left := h.H.Copy().Div(float64(count)) right := mean.Copy().Div(float64(count)) - toAdd := left.Sub(right) - mean.Add(toAdd) + toAdd, err := left.Sub(right) + if err != nil { + return mean, err + } + _, err = mean.Add(toAdd) + if err != nil { + return mean, err + } } - return mean - }), nil + return mean, nil + }) + if err != nil { + metricName := firstSeries.Metric.Get(labels.MetricName) + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange())) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange())) + } + } + return vec, nil } return aggrOverTime(vals, enh, func(s Series) float64 { var mean, count, c float64 @@ -673,13 +704,25 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. - return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) { sum := s.Histograms[0].H.Copy() for _, h := range s.Histograms[1:] { - sum.Add(h.H) + _, err := sum.Add(h.H) + if err != nil { + return sum, err + } } - return sum - }), nil + return sum, nil + }) + if err != nil { + metricName := firstSeries.Metric.Get(labels.MetricName) + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange())) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange())) + } + } + return vec, nil } return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 @@ -948,21 +991,16 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe return enh.Out, nil } -func kahanSum(samples []float64) float64 { - var sum, c float64 - - for _, v := range samples { - sum, c = kahanSumInc(v, sum, c) - } - return sum + c -} - func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { t := sum + inc + switch { + case math.IsInf(t, 0): + c = 0 + // Using Neumaier improvement, swap if next term larger than sum. - if math.Abs(sum) >= math.Abs(inc) { + case math.Abs(sum) >= math.Abs(inc): c += (sum - t) + inc - } else { + default: c += (inc - t) + sum } return t, c diff --git a/promql/functions_internal_test.go b/promql/functions_internal_test.go new file mode 100644 index 0000000000..85c6cd7973 --- /dev/null +++ b/promql/functions_internal_test.go @@ -0,0 +1,81 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestKahanSumInc(t *testing.T) { + testCases := map[string]struct { + first float64 + second float64 + expected float64 + }{ + "+Inf + anything = +Inf": { + first: math.Inf(1), + second: 2.0, + expected: math.Inf(1), + }, + "-Inf + anything = -Inf": { + first: math.Inf(-1), + second: 2.0, + expected: math.Inf(-1), + }, + "+Inf + -Inf = NaN": { + first: math.Inf(1), + second: math.Inf(-1), + expected: math.NaN(), + }, + "NaN + anything = NaN": { + first: math.NaN(), + second: 2, + expected: math.NaN(), + }, + "NaN + Inf = NaN": { + first: math.NaN(), + second: math.Inf(1), + expected: math.NaN(), + }, + "NaN + -Inf = NaN": { + first: math.NaN(), + second: math.Inf(-1), + expected: math.NaN(), + }, + } + + runTest := func(t *testing.T, a, b, expected float64) { + t.Run(fmt.Sprintf("%v + %v = %v", a, b, expected), func(t *testing.T) { + sum, c := kahanSumInc(b, a, 0) + result := sum + c + + if math.IsNaN(expected) { + require.Truef(t, math.IsNaN(result), "expected result to be NaN, but got %v (from %v + %v)", result, sum, c) + } else { + require.Equalf(t, expected, result, "expected result to be %v, but got %v (from %v + %v)", expected, result, sum, c) + } + }) + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + runTest(t, testCase.first, testCase.second, testCase.expected) + runTest(t, testCase.second, testCase.first, testCase.expected) + }) + } +} diff --git a/promql/functions_test.go b/promql/functions_test.go index 6d5c3784ea..aef59c8379 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -11,11 +11,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "context" - "math" "testing" "time" @@ -23,6 +22,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/util/teststorage" ) @@ -33,13 +33,13 @@ func TestDeriv(t *testing.T) { // so we test it by hand. storage := teststorage.New(t) defer storage.Close() - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10000, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) a := storage.Appender(context.Background()) @@ -70,19 +70,13 @@ func TestDeriv(t *testing.T) { func TestFunctionList(t *testing.T) { // Test that Functions and parser.Functions list the same functions. - for i := range FunctionCalls { + for i := range promql.FunctionCalls { _, ok := parser.Functions[i] require.True(t, ok, "function %s exists in promql package, but not in parser package", i) } for i := range parser.Functions { - _, ok := FunctionCalls[i] + _, ok := promql.FunctionCalls[i] require.True(t, ok, "function %s exists in parser package, but not in promql package", i) } } - -func TestKahanSum(t *testing.T) { - vals := []float64{1.0, math.Pow(10, 100), 1.0, -1 * math.Pow(10, 100)} - expected := 2.0 - require.Equal(t, expected, kahanSum(vals)) -} diff --git a/promql/histogram_stats_iterator.go b/promql/histogram_stats_iterator.go new file mode 100644 index 0000000000..dfafea5f8c --- /dev/null +++ b/promql/histogram_stats_iterator.go @@ -0,0 +1,144 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/tsdb/chunkenc" +) + +type histogramStatsIterator struct { + chunkenc.Iterator + + currentH *histogram.Histogram + lastH *histogram.Histogram + + currentFH *histogram.FloatHistogram + lastFH *histogram.FloatHistogram +} + +// NewHistogramStatsIterator creates an iterator which returns histogram objects +// which have only their sum and count values populated. The iterator handles +// counter reset detection internally and sets the counter reset hint accordingly +// in each returned histogram objects. +func NewHistogramStatsIterator(it chunkenc.Iterator) chunkenc.Iterator { + return &histogramStatsIterator{ + Iterator: it, + currentH: &histogram.Histogram{}, + currentFH: &histogram.FloatHistogram{}, + } +} + +// AtHistogram returns the next timestamp/histogram pair. The counter reset +// detection is guaranteed to be correct only when the caller does not switch +// between AtHistogram and AtFloatHistogram calls. +func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { + var t int64 + t, f.currentH = f.Iterator.AtHistogram(f.currentH) + if value.IsStaleNaN(f.currentH.Sum) { + f.setLastH(f.currentH) + h = &histogram.Histogram{Sum: f.currentH.Sum} + return t, h + } + + if h == nil { + h = &histogram.Histogram{ + CounterResetHint: f.getResetHint(f.currentH), + Count: f.currentH.Count, + Sum: f.currentH.Sum, + } + f.setLastH(f.currentH) + return t, h + } + + h.CounterResetHint = f.getResetHint(f.currentH) + h.Count = f.currentH.Count + h.Sum = f.currentH.Sum + f.setLastH(f.currentH) + return t, h +} + +// AtFloatHistogram returns the next timestamp/float histogram pair. The counter +// reset detection is guaranteed to be correct only when the caller does not +// switch between AtHistogram and AtFloatHistogram calls. +func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { + var t int64 + t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH) + if value.IsStaleNaN(f.currentFH.Sum) { + f.setLastFH(f.currentFH) + return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum} + } + + if fh == nil { + fh = &histogram.FloatHistogram{ + CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint), + Count: f.currentFH.Count, + Sum: f.currentFH.Sum, + } + f.setLastFH(f.currentFH) + return t, fh + } + + fh.CounterResetHint = f.getFloatResetHint(f.currentFH.CounterResetHint) + fh.Count = f.currentFH.Count + fh.Sum = f.currentFH.Sum + f.setLastFH(f.currentFH) + return t, fh +} + +func (f *histogramStatsIterator) setLastH(h *histogram.Histogram) { + if f.lastH == nil { + f.lastH = h.Copy() + } else { + h.CopyTo(f.lastH) + } +} + +func (f *histogramStatsIterator) setLastFH(fh *histogram.FloatHistogram) { + if f.lastFH == nil { + f.lastFH = fh.Copy() + } else { + fh.CopyTo(f.lastFH) + } +} + +func (f *histogramStatsIterator) getFloatResetHint(hint histogram.CounterResetHint) histogram.CounterResetHint { + if hint != histogram.UnknownCounterReset { + return hint + } + if f.lastFH == nil { + return histogram.NotCounterReset + } + + if f.currentFH.DetectReset(f.lastFH) { + return histogram.CounterReset + } + return histogram.NotCounterReset +} + +func (f *histogramStatsIterator) getResetHint(h *histogram.Histogram) histogram.CounterResetHint { + if h.CounterResetHint != histogram.UnknownCounterReset { + return h.CounterResetHint + } + if f.lastH == nil { + return histogram.NotCounterReset + } + + fh, prevFH := h.ToFloat(nil), f.lastH.ToFloat(nil) + if fh.DetectReset(prevFH) { + return histogram.CounterReset + } + return histogram.NotCounterReset +} diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go new file mode 100644 index 0000000000..b71a9d6029 --- /dev/null +++ b/promql/histogram_stats_iterator_test.go @@ -0,0 +1,121 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" +) + +func TestHistogramStatsDecoding(t *testing.T) { + histograms := []*histogram.Histogram{ + tsdbutil.GenerateTestHistogram(0), + tsdbutil.GenerateTestHistogram(1), + tsdbutil.GenerateTestHistogram(2), + tsdbutil.GenerateTestHistogram(2), + } + histograms[0].CounterResetHint = histogram.NotCounterReset + histograms[1].CounterResetHint = histogram.UnknownCounterReset + histograms[2].CounterResetHint = histogram.CounterReset + histograms[3].CounterResetHint = histogram.UnknownCounterReset + + expectedHints := []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.NotCounterReset, + histogram.CounterReset, + histogram.NotCounterReset, + } + + t.Run("histogram_stats", func(t *testing.T) { + decodedStats := make([]*histogram.Histogram, 0) + statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil)) + for statsIterator.Next() != chunkenc.ValNone { + _, h := statsIterator.AtHistogram(nil) + decodedStats = append(decodedStats, h) + } + for i := 0; i < len(histograms); i++ { + require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint) + require.Equal(t, histograms[i].Count, decodedStats[i].Count) + require.Equal(t, histograms[i].Sum, decodedStats[i].Sum) + } + }) + t.Run("float_histogram_stats", func(t *testing.T) { + decodedStats := make([]*histogram.FloatHistogram, 0) + statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil)) + for statsIterator.Next() != chunkenc.ValNone { + _, h := statsIterator.AtFloatHistogram(nil) + decodedStats = append(decodedStats, h) + } + for i := 0; i < len(histograms); i++ { + fh := histograms[i].ToFloat(nil) + require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint) + require.Equal(t, fh.Count, decodedStats[i].Count) + require.Equal(t, fh.Sum, decodedStats[i].Sum) + } + }) +} + +type histogramSeries struct { + histograms []*histogram.Histogram +} + +func newHistogramSeries(histograms []*histogram.Histogram) *histogramSeries { + return &histogramSeries{ + histograms: histograms, + } +} + +func (m histogramSeries) Labels() labels.Labels { return labels.EmptyLabels() } + +func (m histogramSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { + return &histogramIterator{ + i: -1, + histograms: m.histograms, + } +} + +type histogramIterator struct { + i int + histograms []*histogram.Histogram +} + +func (h *histogramIterator) Next() chunkenc.ValueType { + h.i++ + if h.i < len(h.histograms) { + return chunkenc.ValHistogram + } + return chunkenc.ValNone +} + +func (h *histogramIterator) Seek(t int64) chunkenc.ValueType { panic("not implemented") } + +func (h *histogramIterator) At() (int64, float64) { panic("not implemented") } + +func (h *histogramIterator) AtHistogram(_ *histogram.Histogram) (int64, *histogram.Histogram) { + return 0, h.histograms[h.i] +} + +func (h *histogramIterator) AtFloatHistogram(_ *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { + return 0, h.histograms[h.i].ToFloat(nil) +} + +func (h *histogramIterator) AtT() int64 { return 0 } + +func (h *histogramIterator) Err() error { return nil } diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 379352599d..830e8a2c5e 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -198,10 +198,11 @@ type VectorSelector struct { // Offset is the offset used during the query execution // which is calculated using the original offset, at modifier time, // eval time, and subquery offsets in the AST tree. - Offset time.Duration - Timestamp *int64 - StartOrEnd ItemType // Set when @ is used with start() or end() - LabelMatchers []*labels.Matcher + Offset time.Duration + Timestamp *int64 + SkipHistogramBuckets bool // Set when decoding native histogram buckets is not needed for query evaluation. + StartOrEnd ItemType // Set when @ is used with start() or end() + LabelMatchers []*labels.Matcher // The unexpanded seriesSet populated at query preparation time. UnexpandedSeriesSet storage.SeriesSet diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index 841bd31c19..d84acc37c5 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -84,6 +84,7 @@ BUCKETS_DESC NEGATIVE_BUCKETS_DESC ZERO_BUCKET_DESC ZERO_BUCKET_WIDTH_DESC +CUSTOM_VALUES_DESC %token histogramDescEnd // Operators. @@ -125,6 +126,8 @@ STDDEV STDVAR SUM TOPK +LIMITK +LIMIT_RATIO %token aggregatorsEnd // Keywords. @@ -608,7 +611,7 @@ metric : metric_identifier label_set ; -metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END; +metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO; label_set : LEFT_BRACE label_set_list RIGHT_BRACE { $$ = labels.New($2...) } @@ -797,6 +800,11 @@ histogram_desc_item $$ = yylex.(*parser).newMap() $$["z_bucket_w"] = $3 } + | CUSTOM_VALUES_DESC COLON bucket_set + { + $$ = yylex.(*parser).newMap() + $$["custom_values"] = $3 + } | BUCKETS_DESC COLON bucket_set { $$ = yylex.(*parser).newMap() @@ -845,10 +853,10 @@ bucket_set_list : bucket_set_list SPACE number * Keyword lists. */ -aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK ; +aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK | LIMITK | LIMIT_RATIO; // Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name. -maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2; +maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO; unary_op : ADD | SUB; diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 3075b9b1b1..07899c0a00 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -67,62 +67,65 @@ const BUCKETS_DESC = 57375 const NEGATIVE_BUCKETS_DESC = 57376 const ZERO_BUCKET_DESC = 57377 const ZERO_BUCKET_WIDTH_DESC = 57378 -const histogramDescEnd = 57379 -const operatorsStart = 57380 -const ADD = 57381 -const DIV = 57382 -const EQLC = 57383 -const EQL_REGEX = 57384 -const GTE = 57385 -const GTR = 57386 -const LAND = 57387 -const LOR = 57388 -const LSS = 57389 -const LTE = 57390 -const LUNLESS = 57391 -const MOD = 57392 -const MUL = 57393 -const NEQ = 57394 -const NEQ_REGEX = 57395 -const POW = 57396 -const SUB = 57397 -const AT = 57398 -const ATAN2 = 57399 -const operatorsEnd = 57400 -const aggregatorsStart = 57401 -const AVG = 57402 -const BOTTOMK = 57403 -const COUNT = 57404 -const COUNT_VALUES = 57405 -const GROUP = 57406 -const MAX = 57407 -const MIN = 57408 -const QUANTILE = 57409 -const STDDEV = 57410 -const STDVAR = 57411 -const SUM = 57412 -const TOPK = 57413 -const aggregatorsEnd = 57414 -const keywordsStart = 57415 -const BOOL = 57416 -const BY = 57417 -const GROUP_LEFT = 57418 -const GROUP_RIGHT = 57419 -const IGNORING = 57420 -const OFFSET = 57421 -const ON = 57422 -const WITHOUT = 57423 -const keywordsEnd = 57424 -const preprocessorStart = 57425 -const START = 57426 -const END = 57427 -const preprocessorEnd = 57428 -const startSymbolsStart = 57429 -const START_METRIC = 57430 -const START_SERIES_DESCRIPTION = 57431 -const START_EXPRESSION = 57432 -const START_METRIC_SELECTOR = 57433 -const startSymbolsEnd = 57434 +const CUSTOM_VALUES_DESC = 57379 +const histogramDescEnd = 57380 +const operatorsStart = 57381 +const ADD = 57382 +const DIV = 57383 +const EQLC = 57384 +const EQL_REGEX = 57385 +const GTE = 57386 +const GTR = 57387 +const LAND = 57388 +const LOR = 57389 +const LSS = 57390 +const LTE = 57391 +const LUNLESS = 57392 +const MOD = 57393 +const MUL = 57394 +const NEQ = 57395 +const NEQ_REGEX = 57396 +const POW = 57397 +const SUB = 57398 +const AT = 57399 +const ATAN2 = 57400 +const operatorsEnd = 57401 +const aggregatorsStart = 57402 +const AVG = 57403 +const BOTTOMK = 57404 +const COUNT = 57405 +const COUNT_VALUES = 57406 +const GROUP = 57407 +const MAX = 57408 +const MIN = 57409 +const QUANTILE = 57410 +const STDDEV = 57411 +const STDVAR = 57412 +const SUM = 57413 +const TOPK = 57414 +const LIMITK = 57415 +const LIMIT_RATIO = 57416 +const aggregatorsEnd = 57417 +const keywordsStart = 57418 +const BOOL = 57419 +const BY = 57420 +const GROUP_LEFT = 57421 +const GROUP_RIGHT = 57422 +const IGNORING = 57423 +const OFFSET = 57424 +const ON = 57425 +const WITHOUT = 57426 +const keywordsEnd = 57427 +const preprocessorStart = 57428 +const START = 57429 +const END = 57430 +const preprocessorEnd = 57431 +const startSymbolsStart = 57432 +const START_METRIC = 57433 +const START_SERIES_DESCRIPTION = 57434 +const START_EXPRESSION = 57435 +const START_METRIC_SELECTOR = 57436 +const startSymbolsEnd = 57437 var yyToknames = [...]string{ "$end", @@ -161,6 +164,7 @@ var yyToknames = [...]string{ "NEGATIVE_BUCKETS_DESC", "ZERO_BUCKET_DESC", "ZERO_BUCKET_WIDTH_DESC", + "CUSTOM_VALUES_DESC", "histogramDescEnd", "operatorsStart", "ADD", @@ -196,6 +200,8 @@ var yyToknames = [...]string{ "STDVAR", "SUM", "TOPK", + "LIMITK", + "LIMIT_RATIO", "aggregatorsEnd", "keywordsStart", "BOOL", @@ -229,276 +235,298 @@ var yyExca = [...]int16{ -1, 1, 1, -1, -2, 0, - -1, 35, - 1, 134, - 10, 134, - 24, 134, + -1, 37, + 1, 136, + 10, 136, + 24, 136, -2, 0, - -1, 58, - 2, 171, - 15, 171, - 75, 171, - 81, 171, - -2, 100, - -1, 59, - 2, 172, - 15, 172, - 75, 172, - 81, 172, - -2, 101, -1, 60, - 2, 173, - 15, 173, - 75, 173, - 81, 173, - -2, 103, - -1, 61, 2, 174, 15, 174, - 75, 174, - 81, 174, - -2, 104, - -1, 62, + 78, 174, + 84, 174, + -2, 100, + -1, 61, 2, 175, 15, 175, - 75, 175, - 81, 175, - -2, 105, - -1, 63, + 78, 175, + 84, 175, + -2, 101, + -1, 62, 2, 176, 15, 176, - 75, 176, - 81, 176, - -2, 110, - -1, 64, + 78, 176, + 84, 176, + -2, 103, + -1, 63, 2, 177, 15, 177, - 75, 177, - 81, 177, - -2, 112, - -1, 65, + 78, 177, + 84, 177, + -2, 104, + -1, 64, 2, 178, 15, 178, - 75, 178, - 81, 178, - -2, 114, - -1, 66, + 78, 178, + 84, 178, + -2, 105, + -1, 65, 2, 179, 15, 179, - 75, 179, - 81, 179, - -2, 115, - -1, 67, + 78, 179, + 84, 179, + -2, 110, + -1, 66, 2, 180, 15, 180, - 75, 180, - 81, 180, - -2, 116, - -1, 68, + 78, 180, + 84, 180, + -2, 112, + -1, 67, 2, 181, 15, 181, - 75, 181, - 81, 181, - -2, 117, - -1, 69, + 78, 181, + 84, 181, + -2, 114, + -1, 68, 2, 182, 15, 182, - 75, 182, - 81, 182, + 78, 182, + 84, 182, + -2, 115, + -1, 69, + 2, 183, + 15, 183, + 78, 183, + 84, 183, + -2, 116, + -1, 70, + 2, 184, + 15, 184, + 78, 184, + 84, 184, + -2, 117, + -1, 71, + 2, 185, + 15, 185, + 78, 185, + 84, 185, -2, 118, - -1, 195, - 12, 230, - 13, 230, - 18, 230, - 19, 230, - 25, 230, - 39, 230, - 45, 230, - 46, 230, - 49, 230, - 55, 230, - 60, 230, - 61, 230, - 62, 230, - 63, 230, - 64, 230, - 65, 230, - 66, 230, - 67, 230, - 68, 230, - 69, 230, - 70, 230, - 71, 230, - 75, 230, - 79, 230, - 81, 230, - 84, 230, - 85, 230, + -1, 72, + 2, 186, + 15, 186, + 78, 186, + 84, 186, + -2, 122, + -1, 73, + 2, 187, + 15, 187, + 78, 187, + 84, 187, + -2, 123, + -1, 199, + 12, 237, + 13, 237, + 18, 237, + 19, 237, + 25, 237, + 40, 237, + 46, 237, + 47, 237, + 50, 237, + 56, 237, + 61, 237, + 62, 237, + 63, 237, + 64, 237, + 65, 237, + 66, 237, + 67, 237, + 68, 237, + 69, 237, + 70, 237, + 71, 237, + 72, 237, + 73, 237, + 74, 237, + 78, 237, + 82, 237, + 84, 237, + 87, 237, + 88, 237, -2, 0, - -1, 196, - 12, 230, - 13, 230, - 18, 230, - 19, 230, - 25, 230, - 39, 230, - 45, 230, - 46, 230, - 49, 230, - 55, 230, - 60, 230, - 61, 230, - 62, 230, - 63, 230, - 64, 230, - 65, 230, - 66, 230, - 67, 230, - 68, 230, - 69, 230, - 70, 230, - 71, 230, - 75, 230, - 79, 230, - 81, 230, - 84, 230, - 85, 230, + -1, 200, + 12, 237, + 13, 237, + 18, 237, + 19, 237, + 25, 237, + 40, 237, + 46, 237, + 47, 237, + 50, 237, + 56, 237, + 61, 237, + 62, 237, + 63, 237, + 64, 237, + 65, 237, + 66, 237, + 67, 237, + 68, 237, + 69, 237, + 70, 237, + 71, 237, + 72, 237, + 73, 237, + 74, 237, + 78, 237, + 82, 237, + 84, 237, + 87, 237, + 88, 237, -2, 0, - -1, 217, - 21, 228, + -1, 221, + 21, 235, -2, 0, - -1, 285, - 21, 229, + -1, 292, + 21, 236, -2, 0, } const yyPrivate = 57344 -const yyLast = 742 +const yyLast = 793 var yyAct = [...]int16{ - 151, 322, 320, 268, 327, 148, 221, 37, 187, 144, - 281, 280, 152, 113, 77, 173, 104, 102, 101, 6, - 128, 223, 105, 193, 155, 194, 195, 196, 339, 262, - 260, 233, 317, 316, 57, 100, 294, 239, 103, 146, - 300, 313, 263, 156, 156, 283, 147, 338, 259, 123, - 337, 106, 252, 311, 155, 299, 340, 301, 264, 157, - 157, 108, 298, 109, 235, 236, 292, 251, 237, 107, - 155, 292, 174, 191, 175, 96, 250, 99, 258, 224, - 226, 228, 229, 230, 238, 240, 243, 244, 245, 246, - 247, 110, 145, 225, 227, 231, 232, 234, 241, 242, - 98, 257, 321, 248, 249, 2, 3, 4, 5, 218, - 158, 104, 177, 217, 168, 162, 165, 105, 175, 160, - 164, 161, 176, 178, 189, 213, 106, 328, 216, 256, - 183, 179, 192, 163, 181, 100, 190, 197, 198, 199, - 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 255, 182, 72, 212, 177, 214, 215, 33, - 82, 84, 85, 7, 86, 87, 176, 178, 90, 91, - 223, 93, 94, 95, 116, 96, 97, 99, 83, 147, - 233, 286, 289, 116, 114, 254, 239, 288, 147, 172, - 220, 124, 253, 114, 171, 310, 309, 117, 120, 261, - 98, 112, 287, 119, 278, 279, 117, 170, 282, 10, - 308, 159, 307, 235, 236, 312, 118, 237, 147, 74, - 306, 305, 304, 303, 302, 250, 81, 285, 224, 226, - 228, 229, 230, 238, 240, 243, 244, 245, 246, 247, - 79, 79, 225, 227, 231, 232, 234, 241, 242, 48, - 78, 78, 248, 249, 122, 73, 121, 150, 180, 76, - 290, 291, 293, 56, 295, 8, 9, 9, 34, 35, - 1, 284, 296, 297, 155, 129, 130, 131, 132, 133, - 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, - 47, 46, 45, 44, 156, 314, 315, 127, 43, 42, - 41, 185, 319, 125, 166, 324, 325, 326, 188, 323, - 157, 329, 191, 331, 330, 155, 40, 126, 332, 333, - 100, 51, 72, 334, 53, 39, 38, 22, 52, 336, - 49, 167, 186, 335, 54, 156, 265, 80, 341, 153, - 154, 184, 219, 75, 115, 82, 84, 149, 70, 55, - 222, 157, 50, 111, 18, 19, 93, 94, 20, 0, - 96, 97, 99, 83, 71, 0, 0, 0, 0, 58, - 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, - 69, 0, 0, 0, 13, 98, 0, 0, 24, 0, - 30, 0, 0, 31, 32, 36, 100, 51, 72, 0, - 53, 267, 0, 22, 52, 0, 0, 0, 266, 0, - 54, 0, 270, 271, 269, 275, 277, 274, 276, 272, - 273, 0, 84, 0, 70, 0, 0, 0, 0, 0, - 18, 19, 93, 94, 20, 0, 96, 0, 99, 83, - 71, 0, 0, 0, 0, 58, 59, 60, 61, 62, - 63, 64, 65, 66, 67, 68, 69, 0, 0, 0, - 13, 98, 0, 0, 24, 0, 30, 0, 0, 31, - 32, 51, 72, 0, 53, 318, 0, 22, 52, 0, - 0, 0, 0, 0, 54, 0, 270, 271, 269, 275, - 277, 274, 276, 272, 273, 0, 0, 0, 70, 0, - 0, 0, 0, 0, 18, 19, 0, 0, 20, 0, - 0, 0, 17, 72, 71, 0, 0, 0, 22, 58, - 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, - 69, 0, 0, 0, 13, 0, 0, 0, 24, 0, - 30, 0, 0, 31, 32, 18, 19, 0, 0, 20, - 0, 0, 0, 17, 33, 0, 0, 0, 0, 22, - 11, 12, 14, 15, 16, 21, 23, 25, 26, 27, - 28, 29, 0, 0, 0, 13, 0, 0, 0, 24, - 0, 30, 0, 0, 31, 32, 18, 19, 0, 0, - 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 11, 12, 14, 15, 16, 21, 23, 25, 26, - 27, 28, 29, 100, 0, 0, 13, 0, 0, 0, - 24, 169, 30, 0, 0, 31, 32, 0, 0, 0, - 0, 0, 100, 0, 0, 0, 0, 0, 82, 84, - 85, 0, 86, 87, 88, 89, 90, 91, 92, 93, - 94, 95, 0, 96, 97, 99, 83, 82, 84, 85, - 0, 86, 87, 88, 89, 90, 91, 92, 93, 94, - 95, 0, 96, 97, 99, 83, 100, 0, 98, 0, + 155, 330, 328, 274, 335, 152, 225, 39, 191, 148, + 288, 287, 156, 117, 81, 177, 227, 106, 105, 6, + 154, 108, 107, 197, 132, 198, 237, 109, 199, 200, + 159, 59, 243, 325, 324, 110, 321, 159, 189, 268, + 348, 301, 265, 127, 159, 192, 349, 264, 290, 195, + 176, 160, 159, 269, 308, 175, 319, 195, 160, 347, + 239, 240, 346, 112, 241, 113, 299, 161, 174, 270, + 263, 111, 254, 160, 161, 228, 230, 232, 233, 234, + 242, 244, 247, 248, 249, 250, 251, 255, 256, 161, + 114, 229, 231, 235, 236, 238, 245, 246, 108, 266, + 258, 252, 253, 329, 109, 157, 158, 159, 2, 3, + 4, 5, 307, 160, 162, 257, 262, 299, 172, 166, + 169, 217, 104, 164, 110, 165, 150, 306, 193, 161, + 178, 104, 179, 151, 305, 183, 196, 179, 185, 261, + 194, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 128, 227, 88, 216, + 120, 218, 219, 100, 336, 103, 168, 237, 97, 98, + 118, 181, 100, 243, 103, 87, 181, 224, 259, 167, + 149, 180, 182, 121, 187, 76, 180, 182, 120, 260, + 102, 35, 124, 7, 10, 296, 151, 123, 118, 102, + 295, 239, 240, 267, 78, 241, 116, 186, 285, 286, + 122, 121, 289, 254, 318, 294, 228, 230, 232, 233, + 234, 242, 244, 247, 248, 249, 250, 251, 255, 256, + 317, 292, 229, 231, 235, 236, 238, 245, 246, 316, + 315, 314, 252, 253, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 313, + 312, 311, 310, 309, 320, 293, 297, 298, 300, 273, + 302, 222, 151, 8, 85, 221, 272, 37, 303, 304, + 276, 277, 275, 282, 284, 281, 283, 278, 279, 280, + 220, 163, 126, 50, 125, 36, 1, 291, 151, 77, + 83, 49, 322, 323, 48, 83, 47, 104, 46, 327, + 82, 131, 332, 333, 334, 82, 331, 45, 184, 338, + 337, 340, 339, 80, 44, 43, 341, 342, 129, 53, + 76, 343, 55, 86, 88, 22, 54, 345, 170, 171, + 42, 130, 56, 41, 97, 98, 40, 350, 100, 101, + 103, 87, 58, 51, 190, 9, 9, 74, 344, 271, + 84, 188, 223, 18, 19, 79, 119, 20, 153, 57, + 226, 52, 115, 75, 0, 102, 0, 0, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 0, 0, 0, 13, 0, 0, 0, 24, + 0, 30, 0, 0, 31, 32, 38, 0, 53, 76, + 0, 55, 326, 0, 22, 54, 0, 0, 0, 0, + 0, 56, 0, 276, 277, 275, 282, 284, 281, 283, + 278, 279, 280, 0, 0, 0, 74, 0, 0, 0, + 0, 0, 18, 19, 0, 0, 20, 0, 0, 0, + 0, 0, 75, 0, 0, 0, 0, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 0, 0, 0, 13, 0, 0, 0, 24, 0, + 30, 0, 0, 31, 32, 53, 76, 0, 55, 0, + 0, 22, 54, 0, 0, 0, 0, 0, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 98, 0, 0, - 0, 82, 84, 85, 0, 86, 87, 88, 0, 90, - 91, 92, 93, 94, 95, 0, 96, 97, 99, 83, + 0, 0, 0, 74, 0, 17, 76, 0, 0, 18, + 19, 22, 0, 20, 0, 0, 0, 0, 0, 75, + 0, 0, 0, 0, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 0, 18, + 19, 13, 0, 20, 0, 24, 0, 30, 0, 0, + 31, 32, 0, 0, 11, 12, 14, 15, 16, 21, + 23, 25, 26, 27, 28, 29, 33, 34, 17, 35, + 0, 13, 0, 0, 22, 24, 0, 30, 0, 0, + 31, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 18, 19, 0, 0, 20, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 11, 12, 14, + 15, 16, 21, 23, 25, 26, 27, 28, 29, 33, + 34, 104, 0, 0, 13, 0, 0, 0, 24, 173, + 30, 0, 0, 31, 32, 0, 0, 0, 0, 0, + 104, 0, 0, 0, 0, 0, 0, 86, 88, 89, + 0, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 0, 100, 101, 103, 87, 86, 88, 89, 0, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 0, 100, 101, 103, 87, 104, 0, 0, 0, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 98, + 0, 0, 0, 0, 104, 0, 0, 0, 102, 0, + 0, 86, 88, 89, 0, 90, 91, 92, 0, 94, + 95, 96, 97, 98, 99, 0, 100, 101, 103, 87, + 86, 88, 89, 0, 90, 91, 0, 0, 94, 95, + 0, 97, 98, 99, 0, 100, 101, 103, 87, 0, + 0, 0, 0, 102, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 102, } var yyPact = [...]int16{ - 17, 153, 541, 541, 385, 500, -1000, -1000, -1000, 146, + 17, 183, 566, 566, 396, 503, -1000, -1000, -1000, 178, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 239, -1000, 224, -1000, 618, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 303, -1000, 272, -1000, 646, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 36, 111, -1000, 459, -1000, 459, 141, -1000, -1000, -1000, + -1000, -1000, 20, 109, -1000, 473, -1000, 473, 172, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 181, -1000, -1000, 196, -1000, -1000, 252, -1000, - 25, -1000, -54, -54, -54, -54, -54, -54, -54, -54, - -54, -54, -54, -54, -54, -54, -54, -54, 37, 255, - 209, 111, -59, -1000, 118, 118, 309, -1000, 599, 21, - -1000, 187, -1000, -1000, 70, 114, -1000, -1000, -1000, 238, - -1000, 128, -1000, 296, 459, -1000, -55, -50, -1000, 459, - 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, - 459, 459, 459, 459, -1000, 170, -1000, -1000, -1000, 110, - -1000, -1000, -1000, -1000, -1000, -1000, 51, 51, 107, -1000, - -1000, -1000, -1000, 168, -1000, -1000, 45, -1000, 618, -1000, - -1000, 172, -1000, 127, -1000, -1000, -1000, -1000, -1000, 76, - -1000, -1000, -1000, -1000, -1000, 22, 4, 3, -1000, -1000, - -1000, 384, 382, 118, 118, 118, 118, 21, 21, 306, - 306, 306, 121, 662, 306, 306, 121, 21, 21, 306, - 21, 382, -1000, 23, -1000, -1000, -1000, 179, -1000, 180, + -1000, -1000, -1000, -1000, -1000, -1000, 186, -1000, -1000, 190, + -1000, -1000, 290, -1000, 19, -1000, -53, -53, -53, -53, + -53, -53, -53, -53, -53, -53, -53, -53, -53, -53, + -53, -53, 124, 18, 289, 109, -57, -1000, 164, 164, + 317, -1000, 627, 108, -1000, 48, -1000, -1000, 128, 133, + -1000, -1000, -1000, 298, -1000, 182, -1000, 33, 473, -1000, + -58, -51, -1000, 473, 473, 473, 473, 473, 473, 473, + 473, 473, 473, 473, 473, 473, 473, 473, -1000, 187, + -1000, -1000, -1000, 106, -1000, -1000, -1000, -1000, -1000, -1000, + 88, 88, 269, -1000, -1000, -1000, -1000, 155, -1000, -1000, + 93, -1000, 646, -1000, -1000, 158, -1000, 114, -1000, -1000, + -1000, -1000, -1000, 45, -1000, -1000, -1000, -1000, -1000, 16, + 73, 13, -1000, -1000, -1000, 252, 117, 164, 164, 164, + 164, 108, 108, 293, 293, 293, 710, 691, 293, 293, + 710, 108, 108, 293, 108, 117, -1000, 26, -1000, -1000, + -1000, 263, -1000, 193, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 459, -1000, -1000, -1000, -1000, -1000, -1000, 52, - 52, 10, 52, 57, 57, 38, 40, -1000, -1000, 218, - 217, 216, 215, 214, 206, 204, 190, 189, -1000, -1000, - -1000, -1000, -1000, -1000, 32, 213, -1000, -1000, 19, -1000, - 618, -1000, -1000, -1000, 52, -1000, 7, 6, 458, -1000, - -1000, -1000, 47, 5, 51, 51, 51, 113, 47, 113, - 47, -1000, -1000, -1000, -1000, -1000, 52, 52, -1000, -1000, - -1000, 52, -1000, -1000, -1000, -1000, -1000, -1000, 51, -1000, - -1000, -1000, -1000, -1000, -1000, 26, -1000, 35, -1000, -1000, - -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 473, -1000, + -1000, -1000, -1000, -1000, -1000, 98, 98, 15, 98, 41, + 41, 110, 37, -1000, -1000, 257, 256, 255, 254, 253, + 235, 234, 233, 224, 208, -1000, -1000, -1000, -1000, -1000, + -1000, 35, 262, -1000, -1000, 14, -1000, 646, -1000, -1000, + -1000, 98, -1000, 8, 7, 395, -1000, -1000, -1000, 47, + 11, 88, 88, 88, 150, 150, 47, 150, 47, -1000, + -1000, -1000, -1000, -1000, 98, 98, -1000, -1000, -1000, 98, + -1000, -1000, -1000, -1000, -1000, -1000, 88, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 38, -1000, 25, -1000, -1000, -1000, + -1000, } var yyPgo = [...]int16{ - 0, 353, 13, 352, 6, 15, 350, 263, 349, 347, - 344, 209, 265, 343, 14, 342, 10, 11, 341, 337, - 8, 336, 3, 4, 333, 2, 1, 0, 332, 12, - 5, 330, 326, 18, 191, 325, 317, 7, 316, 304, - 17, 303, 34, 300, 299, 298, 297, 293, 292, 291, - 290, 249, 9, 271, 270, 268, + 0, 372, 13, 371, 6, 15, 370, 352, 369, 368, + 366, 194, 273, 365, 14, 362, 10, 11, 361, 360, + 8, 359, 3, 4, 358, 2, 1, 0, 354, 12, + 5, 353, 346, 18, 156, 343, 341, 7, 340, 338, + 17, 328, 31, 325, 324, 317, 311, 308, 306, 304, + 301, 293, 9, 297, 296, 295, } var yyR1 = [...]int8{ @@ -514,18 +542,18 @@ var yyR1 = [...]int8{ 1, 2, 2, 2, 2, 2, 2, 2, 12, 12, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 11, 11, 11, 11, 13, 13, 13, 14, - 14, 14, 14, 55, 19, 19, 19, 19, 18, 18, - 18, 18, 18, 18, 18, 18, 18, 28, 28, 28, - 20, 20, 20, 20, 21, 21, 21, 22, 22, 22, - 22, 22, 22, 22, 22, 22, 23, 23, 24, 24, - 24, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 11, 11, 11, 11, 13, 13, + 13, 14, 14, 14, 14, 55, 19, 19, 19, 19, + 18, 18, 18, 18, 18, 18, 18, 18, 18, 28, + 28, 28, 20, 20, 20, 20, 21, 21, 21, 22, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, + 23, 24, 24, 24, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 8, 8, 5, 5, 5, 5, 44, 27, 29, 29, - 30, 30, 26, 25, 25, 52, 48, 10, 53, 53, - 17, 17, + 6, 6, 6, 6, 6, 6, 6, 8, 8, 5, + 5, 5, 5, 44, 27, 29, 29, 30, 30, 26, + 25, 25, 52, 48, 10, 53, 53, 17, 17, } var yyR2 = [...]int8{ @@ -541,94 +569,96 @@ var yyR2 = [...]int8{ 2, 3, 3, 1, 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 3, 4, 2, 0, 3, 1, 2, 3, - 3, 2, 1, 2, 0, 3, 2, 1, 1, 3, - 1, 3, 4, 1, 3, 5, 5, 1, 1, 1, - 4, 3, 3, 2, 3, 1, 2, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 4, 3, 3, 1, - 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 3, 4, 2, 0, 3, 1, + 2, 3, 3, 2, 1, 2, 0, 3, 2, 1, + 1, 3, 1, 3, 4, 1, 3, 5, 5, 1, + 1, 1, 4, 3, 3, 2, 3, 1, 2, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, + 3, 3, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 2, 1, 1, 1, 1, 0, 1, - 0, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, + 2, 1, 1, 1, 1, 0, 1, 0, 1, } var yyChk = [...]int16{ - -1000, -54, 88, 89, 90, 91, 2, 10, -12, -7, - -11, 60, 61, 75, 62, 63, 64, 12, 45, 46, - 49, 65, 18, 66, 79, 67, 68, 69, 70, 71, - 81, 84, 85, 13, -55, -12, 10, -37, -32, -35, - -38, -43, -44, -45, -47, -48, -49, -50, -51, -31, - -3, 12, 19, 15, 25, -8, -7, -42, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 39, 55, 13, -51, -11, -13, 20, -14, 12, 2, - -19, 2, 39, 57, 40, 41, 43, 44, 45, 46, - 47, 48, 49, 50, 51, 52, 54, 55, 79, 56, - 14, -33, -40, 2, 75, 81, 15, -40, -37, -37, - -42, -1, 20, -2, 12, -10, 2, 25, 20, 7, - 2, 4, 2, 24, -34, -41, -36, -46, 74, -34, - -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, - -34, -34, -34, -34, -52, 55, 2, 9, -30, -9, - 2, -27, -29, 84, 85, 19, 39, 55, -52, 2, - -40, -33, -16, 15, 2, -16, -39, 22, -37, 22, - 20, 7, 2, -5, 2, 4, 52, 42, 53, -5, - 20, -14, 25, 2, -18, 5, -28, -20, 12, -27, - -29, 16, -37, 78, 80, 76, 77, -37, -37, -37, - -37, -37, -37, -37, -37, -37, -37, -37, -37, -37, - -37, -37, -52, 15, -27, -27, 21, 6, 2, -15, - 22, -4, -6, 2, 60, 74, 61, 75, 62, 63, - 64, 76, 77, 12, 78, 45, 46, 49, 65, 18, - 66, 79, 80, 67, 68, 69, 70, 71, 84, 85, - 57, 22, 7, 20, -2, 25, 2, 25, 2, 26, - 26, -29, 26, 39, 55, -21, 24, 17, -22, 30, - 28, 29, 35, 36, 33, 31, 34, 32, -16, -16, - -17, -16, -17, 22, -53, -52, 2, 22, 7, 2, - -37, -26, 19, -26, 26, -26, -20, -20, 24, 17, - 2, 17, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 21, 2, 22, -4, -26, 26, 26, 17, -22, - -25, 55, -26, -30, -27, -27, -27, -23, 14, -25, - -23, -25, -26, -26, -26, -24, -27, 24, 21, 2, - 21, -27, + -1000, -54, 91, 92, 93, 94, 2, 10, -12, -7, + -11, 61, 62, 78, 63, 64, 65, 12, 46, 47, + 50, 66, 18, 67, 82, 68, 69, 70, 71, 72, + 84, 87, 88, 73, 74, 13, -55, -12, 10, -37, + -32, -35, -38, -43, -44, -45, -47, -48, -49, -50, + -51, -31, -3, 12, 19, 15, 25, -8, -7, -42, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 40, 56, 13, -51, -11, -13, + 20, -14, 12, 2, -19, 2, 40, 58, 41, 42, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 55, 56, 82, 57, 14, -33, -40, 2, 78, 84, + 15, -40, -37, -37, -42, -1, 20, -2, 12, -10, + 2, 25, 20, 7, 2, 4, 2, 24, -34, -41, + -36, -46, 77, -34, -34, -34, -34, -34, -34, -34, + -34, -34, -34, -34, -34, -34, -34, -34, -52, 56, + 2, 9, -30, -9, 2, -27, -29, 87, 88, 19, + 40, 56, -52, 2, -40, -33, -16, 15, 2, -16, + -39, 22, -37, 22, 20, 7, 2, -5, 2, 4, + 53, 43, 54, -5, 20, -14, 25, 2, -18, 5, + -28, -20, 12, -27, -29, 16, -37, 81, 83, 79, + 80, -37, -37, -37, -37, -37, -37, -37, -37, -37, + -37, -37, -37, -37, -37, -37, -52, 15, -27, -27, + 21, 6, 2, -15, 22, -4, -6, 2, 61, 77, + 62, 78, 63, 64, 65, 79, 80, 12, 81, 46, + 47, 50, 66, 18, 67, 82, 83, 68, 69, 70, + 71, 72, 87, 88, 58, 73, 74, 22, 7, 20, + -2, 25, 2, 25, 2, 26, 26, -29, 26, 40, + 56, -21, 24, 17, -22, 30, 28, 29, 35, 36, + 37, 33, 31, 34, 32, -16, -16, -17, -16, -17, + 22, -53, -52, 2, 22, 7, 2, -37, -26, 19, + -26, 26, -26, -20, -20, 24, 17, 2, 17, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 21, + 2, 22, -4, -26, 26, 26, 17, -22, -25, 56, + -26, -30, -27, -27, -27, -23, 14, -23, -25, -23, + -25, -26, -26, -26, -24, -27, 24, 21, 2, 21, + -27, } var yyDef = [...]int16{ - 0, -2, 125, 125, 0, 0, 7, 6, 1, 125, + 0, -2, 127, 127, 0, 0, 7, 6, 1, 127, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, - 119, 120, 121, 0, 2, -2, 3, 4, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 0, 106, 216, 0, 226, 0, 83, 84, -2, -2, + 119, 120, 121, 122, 123, 0, 2, -2, 3, 4, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 0, 106, 223, 0, 233, 0, 83, 84, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - 210, 211, 0, 5, 98, 0, 124, 127, 0, 132, - 133, 137, 43, 43, 43, 43, 43, 43, 43, 43, - 43, 43, 43, 43, 43, 43, 43, 43, 0, 0, - 0, 0, 22, 23, 0, 0, 0, 60, 0, 81, - 82, 0, 87, 89, 0, 93, 97, 227, 122, 0, - 128, 0, 131, 136, 0, 42, 47, 48, 44, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 67, 0, 69, 225, 70, 0, - 72, 220, 221, 73, 74, 217, 0, 0, 0, 80, - 20, 21, 24, 0, 54, 25, 0, 62, 64, 66, - 85, 0, 90, 0, 96, 212, 213, 214, 215, 0, - 123, 126, 129, 130, 135, 138, 140, 143, 147, 148, - 149, 0, 26, 0, 0, -2, -2, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 68, 0, 218, 219, 75, -2, 79, 0, - 53, 56, 58, 59, 183, 184, 185, 186, 187, 188, - 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, - 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, - 209, 61, 65, 86, 88, 91, 95, 92, 94, 0, - 0, 0, 0, 0, 0, 0, 0, 153, 155, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 45, 46, - 49, 231, 50, 71, 0, -2, 78, 51, 0, 57, - 63, 139, 222, 141, 0, 144, 0, 0, 0, 151, - 156, 152, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 76, 77, 52, 55, 142, 0, 0, 150, 154, - 157, 0, 224, 158, 159, 160, 161, 162, 0, 163, - 164, 165, 145, 146, 223, 0, 169, 0, 167, 170, - 166, 168, + -2, -2, -2, -2, 217, 218, 0, 5, 98, 0, + 126, 129, 0, 134, 135, 139, 43, 43, 43, 43, + 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, + 43, 43, 0, 0, 0, 0, 22, 23, 0, 0, + 0, 60, 0, 81, 82, 0, 87, 89, 0, 93, + 97, 234, 124, 0, 130, 0, 133, 138, 0, 42, + 47, 48, 44, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 67, 0, + 69, 232, 70, 0, 72, 227, 228, 73, 74, 224, + 0, 0, 0, 80, 20, 21, 24, 0, 54, 25, + 0, 62, 64, 66, 85, 0, 90, 0, 96, 219, + 220, 221, 222, 0, 125, 128, 131, 132, 137, 140, + 142, 145, 149, 150, 151, 0, 26, 0, 0, -2, + -2, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 68, 0, 225, 226, + 75, -2, 79, 0, 53, 56, 58, 59, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 61, 65, 86, + 88, 91, 95, 92, 94, 0, 0, 0, 0, 0, + 0, 0, 0, 155, 157, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 45, 46, 49, 238, 50, + 71, 0, -2, 78, 51, 0, 57, 63, 141, 229, + 143, 0, 146, 0, 0, 0, 153, 158, 154, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 76, + 77, 52, 55, 144, 0, 0, 152, 156, 159, 0, + 231, 160, 161, 162, 163, 164, 0, 165, 166, 167, + 168, 147, 148, 230, 0, 172, 0, 170, 173, 169, + 171, } var yyTok1 = [...]int8{ @@ -645,7 +675,7 @@ var yyTok2 = [...]int8{ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, + 92, 93, 94, 95, } var yyTok3 = [...]int8{ @@ -1501,66 +1531,66 @@ yydefault: { yyVAL.labels = yyDollar[1].labels } - case 122: + case 124: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } - case 123: + case 125: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } - case 124: + case 126: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.labels = labels.New() } - case 125: + case 127: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.labels = labels.New() } - case 126: + case 128: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label) } - case 127: + case 129: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.lblList = []labels.Label{yyDollar[1].label} } - case 128: + case 130: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.lblList = yyDollar[1].lblList } - case 129: + case 131: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 130: + case 132: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 131: + case 133: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 132: + case 134: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 133: + case 135: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).generatedParserResult = &seriesDescription{ @@ -1568,33 +1598,33 @@ yydefault: values: yyDollar[2].series, } } - case 134: + case 136: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.series = []SequenceValue{} } - case 135: + case 137: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 136: + case 138: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.series = yyDollar[1].series } - case 137: + case 139: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 138: + case 140: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 139: + case 141: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1602,12 +1632,12 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 140: + case 142: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 141: + case 143: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1616,7 +1646,7 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 142: + case 144: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1626,12 +1656,12 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 143: + case 145: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}} } - case 144: + case 146: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1641,7 +1671,7 @@ yydefault: //$1 += $2 } } - case 145: + case 147: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1650,7 +1680,7 @@ yydefault: } yyVAL.series = val } - case 146: + case 148: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1659,7 +1689,7 @@ yydefault: } yyVAL.series = val } - case 147: + case 149: yyDollar = yyS[yypt-1 : yypt+1] { if yyDollar[1].item.Val != "stale" { @@ -1667,118 +1697,124 @@ yydefault: } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 150: + case 152: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 151: + case 153: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 152: + case 154: yyDollar = yyS[yypt-3 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 153: + case 155: yyDollar = yyS[yypt-2 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 154: + case 156: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } - case 155: + case 157: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.descriptors = yyDollar[1].descriptors } - case 156: + case 158: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } - case 157: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["schema"] = yyDollar[3].int - } - case 158: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["sum"] = yyDollar[3].float - } case 159: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["count"] = yyDollar[3].float + yyVAL.descriptors["schema"] = yyDollar[3].int } case 160: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket"] = yyDollar[3].float + yyVAL.descriptors["sum"] = yyDollar[3].float } case 161: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float + yyVAL.descriptors["count"] = yyDollar[3].float } case 162: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["z_bucket"] = yyDollar[3].float } case 163: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["offset"] = yyDollar[3].int + yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } case 164: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set } case 165: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_offset"] = yyDollar[3].int + yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } case 166: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = yyDollar[2].bucket_set + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["offset"] = yyDollar[3].int } case 167: yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = yyDollar[2].bucket_set + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } case 168: yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["n_offset"] = yyDollar[3].int } case 169: + yyDollar = yyS[yypt-4 : yypt+1] + { + yyVAL.bucket_set = yyDollar[2].bucket_set + } + case 170: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.bucket_set = yyDollar[2].bucket_set + } + case 171: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + } + case 172: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.bucket_set = []float64{yyDollar[1].float} } - case 216: + case 223: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &NumberLiteral{ @@ -1786,22 +1822,22 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 217: + case 224: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 218: + case 225: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = yyDollar[2].float } - case 219: + case 226: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 222: + case 229: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1810,17 +1846,17 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 223: + case 230: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 224: + case 231: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.int = int64(yyDollar[1].uint) } - case 225: + case 232: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1829,7 +1865,7 @@ yydefault: yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err) } } - case 226: + case 233: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -1837,7 +1873,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 227: + case 234: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -1846,12 +1882,12 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 228: + case 235: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.duration = 0 } - case 230: + case 237: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 4e3de2a668..8c7fbb89b9 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -65,7 +65,7 @@ func (i ItemType) IsAggregator() bool { return i > aggregatorsStart && i < aggre // IsAggregatorWithParam returns true if the Item is an aggregator that takes a parameter. // Returns false otherwise. func (i ItemType) IsAggregatorWithParam() bool { - return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE + return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE || i == LIMITK || i == LIMIT_RATIO } // IsKeyword returns true if the Item corresponds to a keyword. @@ -118,6 +118,8 @@ var key = map[string]ItemType{ "bottomk": BOTTOMK, "count_values": COUNT_VALUES, "quantile": QUANTILE, + "limitk": LIMITK, + "limit_ratio": LIMIT_RATIO, // Keywords. "offset": OFFSET, @@ -135,15 +137,16 @@ var key = map[string]ItemType{ } var histogramDesc = map[string]ItemType{ - "sum": SUM_DESC, - "count": COUNT_DESC, - "schema": SCHEMA_DESC, - "offset": OFFSET_DESC, - "n_offset": NEGATIVE_OFFSET_DESC, - "buckets": BUCKETS_DESC, - "n_buckets": NEGATIVE_BUCKETS_DESC, - "z_bucket": ZERO_BUCKET_DESC, - "z_bucket_w": ZERO_BUCKET_WIDTH_DESC, + "sum": SUM_DESC, + "count": COUNT_DESC, + "schema": SCHEMA_DESC, + "offset": OFFSET_DESC, + "n_offset": NEGATIVE_OFFSET_DESC, + "buckets": BUCKETS_DESC, + "n_buckets": NEGATIVE_BUCKETS_DESC, + "z_bucket": ZERO_BUCKET_DESC, + "z_bucket_w": ZERO_BUCKET_WIDTH_DESC, + "custom_values": CUSTOM_VALUES_DESC, } // ItemTypeStr is the default string representations for common Items. It does not @@ -313,6 +316,11 @@ func (l *Lexer) accept(valid string) bool { return false } +// is peeks and returns true if the next rune is contained in the provided string. +func (l *Lexer) is(valid string) bool { + return strings.ContainsRune(valid, l.peek()) +} + // acceptRun consumes a run of runes from the valid set. func (l *Lexer) acceptRun(valid string) { for strings.ContainsRune(valid, l.next()) { @@ -901,19 +909,78 @@ func acceptRemainingDuration(l *Lexer) bool { // scanNumber scans numbers of different formats. The scanned Item is // not necessarily a valid number. This case is caught by the parser. func (l *Lexer) scanNumber() bool { - digits := "0123456789" + // Modify the digit pattern if the number is hexadecimal. + digitPattern := "0123456789" // Disallow hexadecimal in series descriptions as the syntax is ambiguous. - if !l.seriesDesc && l.accept("0") && l.accept("xX") { - digits = "0123456789abcdefABCDEF" + if !l.seriesDesc && + l.accept("0") && l.accept("xX") { + l.accept("_") // eg., 0X_1FFFP-16 == 0.1249847412109375 + digitPattern = "0123456789abcdefABCDEF" } - l.acceptRun(digits) - if l.accept(".") { - l.acceptRun(digits) - } - if l.accept("eE") { - l.accept("+-") - l.acceptRun("0123456789") + const ( + // Define dot, exponent, and underscore patterns. + dotPattern = "." + exponentPattern = "eE" + underscorePattern = "_" + // Anti-patterns are rune sets that cannot follow their respective rune. + dotAntiPattern = "_." + exponentAntiPattern = "._eE" // and EOL. + underscoreAntiPattern = "._eE" // and EOL. + ) + // All numbers follow the prefix: [.][d][d._eE]* + l.accept(dotPattern) + l.accept(digitPattern) + // [d._eE]* hereon. + dotConsumed := false + exponentConsumed := false + for l.is(digitPattern + dotPattern + underscorePattern + exponentPattern) { + // "." cannot repeat. + if l.is(dotPattern) { + if dotConsumed { + l.accept(dotPattern) + return false + } + } + // "eE" cannot repeat. + if l.is(exponentPattern) { + if exponentConsumed { + l.accept(exponentPattern) + return false + } + } + // Handle dots. + if l.accept(dotPattern) { + dotConsumed = true + if l.accept(dotAntiPattern) { + return false + } + // Fractional hexadecimal literals are not allowed. + if len(digitPattern) > 10 /* 0x[\da-fA-F].[\d]+p[\d] */ { + return false + } + continue + } + // Handle exponents. + if l.accept(exponentPattern) { + exponentConsumed = true + l.accept("+-") + if l.accept(exponentAntiPattern) || l.peek() == eof { + return false + } + continue + } + // Handle underscores. + if l.accept(underscorePattern) { + if l.accept(underscoreAntiPattern) || l.peek() == eof { + return false + } + + continue + } + // Handle digits at the end since we already consumed before this loop. + l.acceptRun(digitPattern) } + // Next thing must not be alphanumeric unless it's the times token // for series repetitions. if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) { diff --git a/promql/parser/lex_test.go b/promql/parser/lex_test.go index f48c457c0c..ac9aa27625 100644 --- a/promql/parser/lex_test.go +++ b/promql/parser/lex_test.go @@ -132,6 +132,84 @@ var tests = []struct { }, { input: "0x123", expected: []Item{{NUMBER, 0, "0x123"}}, + }, { + input: "1..2", + fail: true, + }, { + input: "1.2.", + fail: true, + }, { + input: "00_1_23_4.56_7_8", + expected: []Item{{NUMBER, 0, "00_1_23_4.56_7_8"}}, + }, { + input: "00_1_23__4.56_7_8", + fail: true, + }, { + input: "00_1_23_4._56_7_8", + fail: true, + }, { + input: "00_1_23_4_.56_7_8", + fail: true, + }, { + input: "0x1_2_34", + expected: []Item{{NUMBER, 0, "0x1_2_34"}}, + }, { + input: "0x1_2__34", + fail: true, + }, { + input: "0x1_2__34.5_6p1", // "0x1.1p1"-based formats are not supported yet. + fail: true, + }, { + input: "0x1_2__34.5_6", + fail: true, + }, { + input: "0x1_2__34.56", + fail: true, + }, { + input: "1_e2", + fail: true, + }, { + input: "1.e2", + expected: []Item{{NUMBER, 0, "1.e2"}}, + }, { + input: "1e.2", + fail: true, + }, { + input: "1e+.2", + fail: true, + }, { + input: "1ee2", + fail: true, + }, { + input: "1e+e2", + fail: true, + }, { + input: "1e", + fail: true, + }, { + input: "1e+", + fail: true, + }, { + input: "1e1_2_34", + expected: []Item{{NUMBER, 0, "1e1_2_34"}}, + }, { + input: "1e_1_2_34", + fail: true, + }, { + input: "1e1_2__34", + fail: true, + }, { + input: "1e+_1_2_34", + fail: true, + }, { + input: "1e-_1_2_34", + fail: true, + }, { + input: "12_", + fail: true, + }, { + input: "_1_2", + expected: []Item{{IDENTIFIER, 0, "_1_2"}}, }, }, }, diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 2679336d6c..6f73e2427b 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -447,6 +447,10 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE desiredArgs := 1 if ret.Op.IsAggregatorWithParam() { + if !EnableExperimentalFunctions && (ret.Op == LIMITK || ret.Op == LIMIT_RATIO) { + p.addParseErrf(ret.PositionRange(), "limitk() and limit_ratio() are experimental and must be enabled with --enable-feature=promql-experimental-functions") + return + } desiredArgs = 2 ret.Param = arguments[0] @@ -481,19 +485,19 @@ func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string } func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) { - return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram { + return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) { return a.Add(b) }) } func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) { - return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram { + return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) { return a.Sub(b) }) } func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64, - combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) *histogram.FloatHistogram, + combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) (*histogram.FloatHistogram, error), ) ([]SequenceValue, error) { ret := make([]SequenceValue, times+1) // Add an additional value (the base) for time 0, which we ignore in tests. @@ -504,7 +508,11 @@ func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uin return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema) } - cur = combine(cur.Copy(), inc) + var err error + cur, err = combine(cur.Copy(), inc) + if err != nil { + return ret, err + } ret[i] = SequenceValue{Histogram: cur} } @@ -562,6 +570,15 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram. p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket_w number: %v", val) } } + val, ok = (*desc)["custom_values"] + if ok { + customValues, ok := val.([]float64) + if ok { + output.CustomValues = customValues + } else { + p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing custom_values: %v", val) + } + } buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset") output.PositiveBuckets = buckets @@ -659,7 +676,7 @@ func (p *parser) checkAST(node Node) (typ ValueType) { p.addParseErrf(n.PositionRange(), "aggregation operator expected in aggregation expression but got %q", n.Op) } p.expectType(n.Expr, ValueTypeVector, "aggregation expression") - if n.Op == TOPK || n.Op == BOTTOMK || n.Op == QUANTILE { + if n.Op == TOPK || n.Op == BOTTOMK || n.Op == QUANTILE || n.Op == LIMITK || n.Op == LIMIT_RATIO { p.expectType(n.Param, ValueTypeScalar, "aggregation parameter") } if n.Op == COUNT_VALUES { diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index c56d845947..a4fe28e5b8 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -513,12 +513,12 @@ var testExpr = []struct { { input: "2.5.", fail: true, - errMsg: "unexpected character: '.'", + errMsg: `1:1: parse error: bad number or duration syntax: "2.5."`, }, { input: "100..4", fail: true, - errMsg: `unexpected number ".4"`, + errMsg: `1:1: parse error: bad number or duration syntax: "100.."`, }, { input: "0deadbeef", diff --git a/promql/parser/printer.go b/promql/parser/printer.go index ff171f2152..f3bdefdeb1 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -204,8 +204,8 @@ func (node *VectorSelector) String() string { labelStrings = make([]string, 0, len(node.LabelMatchers)-1) } for _, matcher := range node.LabelMatchers { - // Only include the __name__ label if its equality matching and matches the name. - if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name { + // Only include the __name__ label if its equality matching and matches the name, but don't skip if it's an explicit empty name matcher. + if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name && matcher.Value != "" { continue } labelStrings = append(labelStrings, matcher.String()) diff --git a/promql/parser/printer_test.go b/promql/parser/printer_test.go index a044b6969c..d2e301a884 100644 --- a/promql/parser/printer_test.go +++ b/promql/parser/printer_test.go @@ -135,6 +135,26 @@ func TestExprString(t *testing.T) { { in: `a[1m] @ end()`, }, + { + in: `{__name__="",a="x"}`, + }, + { + in: `{"a.b"="c"}`, + }, + { + in: `{"0"="1"}`, + }, + { + in: `{"_0"="1"}`, + out: `{_0="1"}`, + }, + { + in: `{""="0"}`, + }, + { + in: "{``=\"0\"}", + out: `{""="0"}`, + }, } for _, test := range inputs { @@ -216,6 +236,16 @@ func TestVectorSelector_String(t *testing.T) { }, expected: `{__name__="foobar"}`, }, + { + name: "empty name matcher", + vs: VectorSelector{ + LabelMatchers: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, ""), + labels.MustNewMatcher(labels.MatchEqual, "a", "x"), + }, + }, + expected: `{__name__="",a="x"}`, + }, } { t.Run(tc.name, func(t *testing.T) { require.Equal(t, tc.expected, tc.vs.String()) diff --git a/promql/promql_test.go b/promql/promql_test.go index 05821b1c11..a423f90ee8 100644 --- a/promql/promql_test.go +++ b/promql/promql_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "context" @@ -22,37 +22,33 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/util/teststorage" ) -func newTestEngine() *Engine { - return NewEngine(EngineOpts{ - Logger: nil, - Reg: nil, - MaxSamples: 10000, - Timeout: 100 * time.Second, - NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) }, - EnableAtModifier: true, - EnableNegativeOffset: true, - EnablePerStepStats: true, - }) +func newTestEngine() *promql.Engine { + return promqltest.NewTestEngine(false, 0, promqltest.DefaultMaxSamplesPerQuery) } func TestEvaluations(t *testing.T) { - RunBuiltinTests(t, newTestEngine()) + promqltest.RunBuiltinTests(t, newTestEngine()) } // Run a lot of queries at the same time, to check for race conditions. func TestConcurrentRangeQueries(t *testing.T) { stor := teststorage.New(t) defer stor.Close() - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 50000000, Timeout: 100 * time.Second, } - engine := NewEngine(opts) + // Enable experimental functions testing + parser.EnableExperimentalFunctions = true + engine := promql.NewEngine(opts) const interval = 10000 // 10s interval. // A day of data plus 10k steps. diff --git a/promql/promqltest/README.md b/promql/promqltest/README.md new file mode 100644 index 0000000000..af34354241 --- /dev/null +++ b/promql/promqltest/README.md @@ -0,0 +1,132 @@ +# The PromQL test scripting language + +This package contains two things: + +* an implementation of a test scripting language for PromQL engines +* a predefined set of tests written in that scripting language + +The predefined set of tests can be run against any PromQL engine implementation by calling `promqltest.RunBuiltinTests()`. +Any other test script can be run with `promqltest.RunTest()`. + +The rest of this document explains the test scripting language. + +Each test script is written in plain text. + +Comments can be given by prefixing the comment with a `#`, for example: + +``` +# This is a comment. +``` + +Each test file contains a series of commands. There are three kinds of commands: + +* `load` +* `clear` +* `eval` + +Each command is executed in the order given in the file. + +## `load` command + +`load` adds some data to the test environment. + +The syntax is as follows: + +``` +load + + ... + +``` + +* `` is the step between points (eg. `1m` or `30s`) +* `` is a Prometheus series name in the usual `metric{label="value"}` syntax +* `` is a specification of the points to add for that series, following the same expanding syntax as for `promtool unittest` documented [here](../../docs/configuration/unit_testing_rules.md#series) + +For example: + +``` +load 1m + my_metric{env="prod"} 5 2+3x2 _ stale {{schema:1 sum:3 count:22 buckets:[5 10 7]}} +``` + +...will create a single series with labels `my_metric{env="prod"}`, with the following points: + +* t=0: value is 5 +* t=1m: value is 2 +* t=2m: value is 5 +* t=3m: value is 7 +* t=4m: no point +* t=5m: stale marker +* t=6m: native histogram with schema 1, sum -3, count 22 and bucket counts 5, 10 and 7 + +Each `load` command is additive - it does not replace any data loaded in a previous `load` command. +Use `clear` to remove all loaded data. + +### Native histograms with custom buckets (NHCB) + +When loading a batch of classic histogram float series, you can optionally append the suffix `_with_nhcb` to convert them to native histograms with custom buckets and load both the original float series and the new histogram series. + +## `clear` command + +`clear` removes all data previously loaded with `load` commands. + +## `eval` command + +`eval` runs a query against the test environment and asserts that the result is as expected. + +Both instant and range queries are supported. + +The syntax is as follows: + +``` +# Instant query +eval instant at