mirror of
https://github.com/prometheus/prometheus.git
synced 2026-05-16 01:56:41 +02:00
Merge branch 'main' into krajo/duration-arithmetic
This commit is contained in:
commit
623f4e02cc
2
.github/workflows/buf-lint.yml
vendored
2
.github/workflows/buf-lint.yml
vendored
@ -13,7 +13,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: bufbuild/buf-setup-action@1115d0acd3d2a120b30023fac52abc46807c8fd6 # v1.48.0
|
||||
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||
|
||||
2
.github/workflows/buf.yml
vendored
2
.github/workflows/buf.yml
vendored
@ -13,7 +13,7 @@ jobs:
|
||||
if: github.repository_owner == 'prometheus'
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: bufbuild/buf-setup-action@1115d0acd3d2a120b30023fac52abc46807c8fd6 # v1.48.0
|
||||
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||
|
||||
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
@ -80,7 +80,7 @@ jobs:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
|
||||
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
go-version: 1.23.x
|
||||
- run: |
|
||||
@ -171,7 +171,7 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
|
||||
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
cache: false
|
||||
go-version: 1.23.x
|
||||
@ -184,14 +184,14 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
|
||||
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
go-version: 1.23.x
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||
if: github.repository == 'prometheus/snmp_exporter'
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
|
||||
uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0
|
||||
with:
|
||||
args: --verbose
|
||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||
|
||||
6
.github/workflows/codeql-analysis.yml
vendored
6
.github/workflows/codeql-analysis.yml
vendored
@ -27,12 +27,12 @@ jobs:
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0
|
||||
uses: github/codeql-action/init@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0
|
||||
uses: github/codeql-action/autobuild@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0
|
||||
uses: github/codeql-action/analyze@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8
|
||||
|
||||
2
.github/workflows/fuzzing.yml
vendored
2
.github/workflows/fuzzing.yml
vendored
@ -21,7 +21,7 @@ jobs:
|
||||
fuzz-seconds: 600
|
||||
dry-run: false
|
||||
- name: Upload Crash
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: artifacts
|
||||
|
||||
4
.github/workflows/scorecards.yml
vendored
4
.github/workflows/scorecards.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # tag=v4.5.0
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # tag=v4.6.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
@ -45,6 +45,6 @@ jobs:
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # tag=v3.28.0
|
||||
uses: github/codeql-action/upload-sarif@dd746615b3b9d728a6a37ca2045b68ca76d4841a # tag=v3.28.8
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@ -11,7 +11,7 @@ jobs:
|
||||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# opt out of defaults to avoid marking issues as stale and closing them
|
||||
|
||||
@ -47,7 +47,19 @@ _Notes about the experimental native histograms:_
|
||||
disabling the feature flag again), both instant vectors and range vectors may
|
||||
now contain samples that aren't simple floating point numbers (float samples)
|
||||
but complete histograms (histogram samples). A vector may contain a mix of
|
||||
float samples and histogram samples.
|
||||
float samples and histogram samples. Note that the term “histogram sample” in
|
||||
the PromQL documentation always refers to a native histogram. Classic
|
||||
histograms are broken up into a number of series of float samples. From the
|
||||
perspective of PromQL, there are no “classic histogram samples”.
|
||||
* Like float samples, histogram samples can be counters or gauges, also called
|
||||
counter histograms or gauge histograms, respectively.
|
||||
* Native histograms can have different bucket layouts, but they are generally
|
||||
convertible to compatible versions to apply binary and aggregation operations
|
||||
to them. This is not true for all bucketing schemas. If incompatible
|
||||
histograms are encountered in an operation, the corresponding output vector
|
||||
element is removed from the result, flagged with a warn-level annotation.
|
||||
More details can be found in the
|
||||
[native histogram specification](https://prometheus.io/docs/specs/native_histograms/#compatibility-between-histograms).
|
||||
|
||||
## Literals
|
||||
|
||||
|
||||
@ -11,30 +11,17 @@ instant-vector)`. This means that there is one argument `v` which is an instant
|
||||
vector, which if not provided it will default to the value of the expression
|
||||
`vector(time())`.
|
||||
|
||||
_Notes about the experimental native histograms:_
|
||||
|
||||
* Ingesting native histograms has to be enabled via a [feature
|
||||
flag](../feature_flags.md#native-histograms). As long as no native histograms
|
||||
have been ingested into the TSDB, all functions will behave as usual.
|
||||
* Functions that do not explicitly mention native histograms in their
|
||||
documentation (see below) will ignore histogram samples.
|
||||
* Functions that do already act on native histograms might still change their
|
||||
behavior in the future.
|
||||
* If a function requires the same bucket layout between multiple native
|
||||
histograms it acts on, it will automatically convert them
|
||||
appropriately. (With the currently supported bucket schemas, that's always
|
||||
possible.)
|
||||
|
||||
## `abs()`
|
||||
|
||||
`abs(v instant-vector)` returns the input vector with all sample values converted to
|
||||
their absolute value.
|
||||
`abs(v instant-vector)` returns a vector containing all float samples in the
|
||||
input vector converted to their absolute value. Histogram samples in the input
|
||||
vector are ignored silently.
|
||||
|
||||
## `absent()`
|
||||
|
||||
`absent(v instant-vector)` returns an empty vector if the vector passed to it
|
||||
has any elements (floats or native histograms) and a 1-element vector with the
|
||||
value 1 if the vector passed to it has no elements.
|
||||
has any elements (float samples or histogram samples) and a 1-element vector
|
||||
with the value 1 if the vector passed to it has no elements.
|
||||
|
||||
This is useful for alerting on when no time series exist for a given metric name
|
||||
and label combination.
|
||||
@ -56,8 +43,9 @@ of the 1-element output vector from the input vector.
|
||||
## `absent_over_time()`
|
||||
|
||||
`absent_over_time(v range-vector)` returns an empty vector if the range vector
|
||||
passed to it has any elements (floats or native histograms) and a 1-element
|
||||
vector with the value 1 if the range vector passed to it has no elements.
|
||||
passed to it has any elements (float samples or histogram samples) and a
|
||||
1-element vector with the value 1 if the range vector passed to it has no
|
||||
elements.
|
||||
|
||||
This is useful for alerting on when no time series exist for a given metric name
|
||||
and label combination for a certain amount of time.
|
||||
@ -78,8 +66,9 @@ labels of the 1-element output vector from the input vector.
|
||||
|
||||
## `ceil()`
|
||||
|
||||
`ceil(v instant-vector)` rounds the sample values of all elements in `v` up to
|
||||
the nearest integer value greater than or equal to v.
|
||||
`ceil(v instant-vector)` returns a vector containing all float samples in the
|
||||
input vector rounded up to the nearest integer value greater than or equal to
|
||||
their original value. Histogram samples in the input vector are ignored silently.
|
||||
|
||||
* `ceil(+Inf) = +Inf`
|
||||
* `ceil(±0) = ±0`
|
||||
@ -90,49 +79,62 @@ the nearest integer value greater than or equal to v.
|
||||
|
||||
For each input time series, `changes(v range-vector)` returns the number of
|
||||
times its value has changed within the provided time range as an instant
|
||||
vector.
|
||||
vector. A float sample followed by a histogram sample, or vice versa, counts as
|
||||
a change. A counter histogram sample followed by a gauge histogram sample with
|
||||
otherwise exactly the same values, or vice versa, does not count as a change.
|
||||
|
||||
## `clamp()`
|
||||
|
||||
`clamp(v instant-vector, min scalar, max scalar)`
|
||||
clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`.
|
||||
`clamp(v instant-vector, min scalar, max scalar)` clamps the values of all
|
||||
float samples in `v` to have a lower limit of `min` and an upper limit of
|
||||
`max`. Histogram samples in the input vector are ignored silently.
|
||||
|
||||
Special cases:
|
||||
|
||||
* Return an empty vector if `min > max`
|
||||
* Return `NaN` if `min` or `max` is `NaN`
|
||||
* Float samples are clamped to `NaN` if `min` or `max` is `NaN`
|
||||
|
||||
## `clamp_max()`
|
||||
|
||||
`clamp_max(v instant-vector, max scalar)` clamps the sample values of all
|
||||
elements in `v` to have an upper limit of `max`.
|
||||
`clamp_max(v instant-vector, max scalar)` clamps the values of all float
|
||||
samples in `v` to have an upper limit of `max`. Histogram samples in the input
|
||||
vector are ignored silently.
|
||||
|
||||
## `clamp_min()`
|
||||
|
||||
`clamp_min(v instant-vector, min scalar)` clamps the sample values of all
|
||||
elements in `v` to have a lower limit of `min`.
|
||||
`clamp_min(v instant-vector, min scalar)` clamps the values of all float
|
||||
samples in `v` to have a lower limit of `min`. Histogram samples in the input
|
||||
vector are ignored silently.
|
||||
|
||||
## `day_of_month()`
|
||||
|
||||
`day_of_month(v=vector(time()) instant-vector)` returns the day of the month
|
||||
for each of the given times in UTC. Returned values are from 1 to 31.
|
||||
`day_of_month(v=vector(time()) instant-vector)` interpretes float samples in
|
||||
`v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the
|
||||
day of the month (in UTC) for each of those timestamps. Returned values are
|
||||
from 1 to 31. Histogram samples in the input vector are ignored silently.
|
||||
|
||||
## `day_of_week()`
|
||||
|
||||
`day_of_week(v=vector(time()) instant-vector)` returns the day of the week for
|
||||
each of the given times in UTC. Returned values are from 0 to 6, where 0 means
|
||||
Sunday etc.
|
||||
`day_of_week(v=vector(time()) instant-vector)` interpretes float samples in `v`
|
||||
as timestamps (number of seconds since January 1, 1970 UTC) and returns the day
|
||||
of the week (in UTC) for each of those timestamps. Returned values are from 0
|
||||
to 6, where 0 means Sunday etc. Histogram samples in the input vector are
|
||||
ignored silently.
|
||||
|
||||
## `day_of_year()`
|
||||
## `day_of_year()`
|
||||
|
||||
`day_of_year(v=vector(time()) instant-vector)` returns the day of the year for
|
||||
each of the given times in UTC. Returned values are from 1 to 365 for non-leap years,
|
||||
and 1 to 366 in leap years.
|
||||
`day_of_year(v=vector(time()) instant-vector)` interpretes float samples in `v`
|
||||
as timestamps (number of seconds since January 1, 1970 UTC) and returns the day
|
||||
of the year (in UTC) for each of those timestamps. Returned values are from 1
|
||||
to 365 for non-leap years, and 1 to 366 in leap years. Histogram samples in the
|
||||
input vector are ignored silently.
|
||||
|
||||
## `days_in_month()`
|
||||
|
||||
`days_in_month(v=vector(time()) instant-vector)` returns number of days in the
|
||||
month for each of the given times in UTC. Returned values are from 28 to 31.
|
||||
`days_in_month(v=vector(time()) instant-vector)` interpretes float samples in
|
||||
`v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the
|
||||
number of days in the month of each of those timestamps (in UTC). Returned
|
||||
values are from 28 to 31. Histogram samples in the input vector are ignored silently.
|
||||
|
||||
## `delta()`
|
||||
|
||||
@ -150,36 +152,67 @@ between now and 2 hours ago:
|
||||
delta(cpu_temp_celsius{host="zeus"}[2h])
|
||||
```
|
||||
|
||||
`delta` acts on native histograms by calculating a new histogram where each
|
||||
`delta` acts on histogram samples by calculating a new histogram where each
|
||||
component (sum and count of observations, buckets) is the difference between
|
||||
the respective component in the first and last native histogram in
|
||||
`v`. However, each element in `v` that contains a mix of float and native
|
||||
histogram samples within the range, will be missing from the result vector.
|
||||
the respective component in the first and last native histogram in `v`.
|
||||
However, each element in `v` that contains a mix of float samples and histogram
|
||||
samples within the range will be omitted from the result vector, flagged by a
|
||||
warn-level annotation.
|
||||
|
||||
`delta` should only be used with gauges and native histograms where the
|
||||
components behave like gauges (so-called gauge histograms).
|
||||
`delta` should only be used with gauges (for both floats and histograms).
|
||||
|
||||
## `deriv()`
|
||||
|
||||
`deriv(v range-vector)` calculates the per-second derivative of the time series in a range
|
||||
vector `v`, using [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression).
|
||||
The range vector must have at least two samples in order to perform the calculation. When `+Inf` or
|
||||
`-Inf` are found in the range vector, the slope and offset value calculated will be `NaN`.
|
||||
`deriv(v range-vector)` calculates the per-second derivative of each float time
|
||||
series in the range vector `v`, using [simple linear
|
||||
regression](https://en.wikipedia.org/wiki/Simple_linear_regression). The range
|
||||
vector must have at least two float samples in order to perform the
|
||||
calculation. When `+Inf` or `-Inf` are found in the range vector, the slope and
|
||||
offset value calculated will be `NaN`.
|
||||
|
||||
`deriv` should only be used with gauges.
|
||||
`deriv` should only be used with gauges and only works for float samples.
|
||||
Elements in the range vector that contain only histogram samples are ignored
|
||||
entirely. For elements that contain a mix of float and histogram samples, only
|
||||
the float samples are used as input, which is flagged by an info-level
|
||||
annotation.
|
||||
|
||||
## `double_exponential_smoothing()`
|
||||
|
||||
**This function has to be enabled via the [feature
|
||||
flag](../feature_flags.md#experimental-promql-functions)
|
||||
`--enable-feature=promql-experimental-functions`.**
|
||||
|
||||
`double_exponential_smoothing(v range-vector, sf scalar, tf scalar)` produces a
|
||||
smoothed value for each float time series in the range in `v`. The lower the
|
||||
smoothing factor `sf`, the more importance is given to old data. The higher the
|
||||
trend factor `tf`, the more trends in the data is considered. Both `sf` and
|
||||
`tf` must be between 0 and 1. For additional details, refer to [NIST
|
||||
Engineering Statistics
|
||||
Handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc433.htm). In
|
||||
Prometheus V2 this function was called `holt_winters`. This caused confusion
|
||||
since the Holt-Winters method usually refers to triple exponential smoothing.
|
||||
Double exponential smoothing as implemented here is also referred to as "Holt
|
||||
Linear".
|
||||
|
||||
`double_exponential_smoothing` should only be used with gauges and only works
|
||||
for float samples. Elements in the range vector that contain only histogram
|
||||
samples are ignored entirely. For elements that contain a mix of float and
|
||||
histogram samples, only the float samples are used as input, which is flagged
|
||||
by an info-level annotation.
|
||||
|
||||
## `exp()`
|
||||
|
||||
`exp(v instant-vector)` calculates the exponential function for all elements in `v`.
|
||||
Special cases are:
|
||||
`exp(v instant-vector)` calculates the exponential function for all float
|
||||
samples in `v`. Histogram samples are ignored silently. Special cases are:
|
||||
|
||||
* `Exp(+Inf) = +Inf`
|
||||
* `Exp(NaN) = NaN`
|
||||
|
||||
## `floor()`
|
||||
|
||||
`floor(v instant-vector)` rounds the sample values of all elements in `v` down
|
||||
to the nearest integer value smaller than or equal to v.
|
||||
`floor(v instant-vector)` returns a vector containing all float samples in the
|
||||
input vector rounded down to the nearest integer value smaller than or equal
|
||||
to their original value. Histogram samples in the input vector are ignored silently.
|
||||
|
||||
* `floor(+Inf) = +Inf`
|
||||
* `floor(±0) = ±0`
|
||||
@ -188,12 +221,8 @@ to the nearest integer value smaller than or equal to v.
|
||||
|
||||
## `histogram_avg()`
|
||||
|
||||
_This function only acts on native histograms, which are an experimental
|
||||
feature. The behavior of this function may change in future versions of
|
||||
Prometheus, including its removal from PromQL._
|
||||
|
||||
`histogram_avg(v instant-vector)` returns the arithmetic average of observed values stored in
|
||||
a native histogram. Samples that are not native histograms are ignored and do
|
||||
`histogram_avg(v instant-vector)` returns the arithmetic average of observed
|
||||
values stored in each histogram sample in `v`. Float samples are ignored and do
|
||||
not show up in the returned vector.
|
||||
|
||||
Use `histogram_avg` as demonstrated below to compute the average request duration
|
||||
@ -209,32 +238,25 @@ Which is equivalent to the following query:
|
||||
|
||||
## `histogram_count()` and `histogram_sum()`
|
||||
|
||||
_Both functions only act on native histograms, which are an experimental
|
||||
feature. The behavior of these functions may change in future versions of
|
||||
Prometheus, including their removal from PromQL._
|
||||
|
||||
`histogram_count(v instant-vector)` returns the count of observations stored in
|
||||
a native histogram. Samples that are not native histograms are ignored and do
|
||||
not show up in the returned vector.
|
||||
each histogram sample in `v`. Float samples are ignored and do not show up in
|
||||
the returned vector.
|
||||
|
||||
Similarly, `histogram_sum(v instant-vector)` returns the sum of observations
|
||||
stored in a native histogram.
|
||||
stored in each histogram sample.
|
||||
|
||||
Use `histogram_count` in the following way to calculate a rate of observations
|
||||
(in this case corresponding to “requests per second”) from a native histogram:
|
||||
(in this case corresponding to “requests per second”) from a series of
|
||||
histogram samples:
|
||||
|
||||
histogram_count(rate(http_request_duration_seconds[10m]))
|
||||
|
||||
## `histogram_fraction()`
|
||||
|
||||
_This function only acts on native histograms, which are an experimental
|
||||
feature. The behavior of this function may change in future versions of
|
||||
Prometheus, including its removal from PromQL._
|
||||
|
||||
For a native histogram, `histogram_fraction(lower scalar, upper scalar, v
|
||||
instant-vector)` returns the estimated fraction of observations between the
|
||||
provided lower and upper values. Samples that are not native histograms are
|
||||
ignored and do not show up in the returned vector.
|
||||
`histogram_fraction(lower scalar, upper scalar, v instant-vector)` returns the
|
||||
estimated fraction of observations between the provided lower and upper values
|
||||
for each histogram sample in `v`. Float samples are ignored and do not show up
|
||||
in the returned vector.
|
||||
|
||||
For example, the following expression calculates the fraction of HTTP requests
|
||||
over the last hour that took 200ms or less:
|
||||
@ -253,12 +275,13 @@ observations less than or equal 0.2 would be `-Inf` rather than `0`.
|
||||
Whether the provided boundaries are inclusive or exclusive is only relevant if
|
||||
the provided boundaries are precisely aligned with bucket boundaries in the
|
||||
underlying native histogram. In this case, the behavior depends on the schema
|
||||
definition of the histogram. The currently supported schemas all feature
|
||||
inclusive upper boundaries and exclusive lower boundaries for positive values
|
||||
(and vice versa for negative values). Without a precise alignment of
|
||||
boundaries, the function uses linear interpolation to estimate the
|
||||
fraction. With the resulting uncertainty, it becomes irrelevant if the
|
||||
boundaries are inclusive or exclusive.
|
||||
definition of the histogram. (The usual standard exponential schemas all
|
||||
feature inclusive upper boundaries and exclusive lower boundaries for positive
|
||||
values, and vice versa for negative values.) Without a precise alignment of
|
||||
boundaries, the function uses interpolation to estimate the fraction. With the
|
||||
resulting uncertainty, it becomes irrelevant if the boundaries are inclusive or
|
||||
exclusive. The interpolation method is the same as the one used for
|
||||
`histogram_quantile()`. See there for more details.
|
||||
|
||||
## `histogram_quantile()`
|
||||
|
||||
@ -270,10 +293,6 @@ summaries](https://prometheus.io/docs/practices/histograms) for a detailed
|
||||
explanation of φ-quantiles and the usage of the (classic) histogram metric
|
||||
type in general.)
|
||||
|
||||
_Note that native histograms are an experimental feature. The behavior of this
|
||||
function when dealing with native histograms may change in future versions of
|
||||
Prometheus._
|
||||
|
||||
The float samples in `b` are considered the counts of observations in each
|
||||
bucket of one or more classic histograms. Each float sample must have a label
|
||||
`le` where the label value denotes the inclusive upper bound of the bucket.
|
||||
@ -284,8 +303,8 @@ type](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
||||
automatically provides time series with the `_bucket` suffix and the
|
||||
appropriate labels.
|
||||
|
||||
The native histogram samples in `b` are treated each individually as a separate
|
||||
histogram to calculate the quantile from.
|
||||
The (native) histogram samples in `b` are treated each individually as a
|
||||
separate histogram to calculate the quantile from.
|
||||
|
||||
As long as no naming collisions arise, `b` may contain a mix of classic
|
||||
and native histograms.
|
||||
@ -336,7 +355,9 @@ non-zero-buckets of native histograms with a standard exponential bucketing
|
||||
schema, the interpolation is done under the assumption that the samples within
|
||||
the bucket are distributed in a way that they would uniformly populate the
|
||||
buckets in a hypothetical histogram with higher resolution. (This is also
|
||||
called _exponential interpolation_.)
|
||||
called _exponential interpolation_. See the [native histogram
|
||||
specification](https://prometheus.io/docs/specs/native_histograms/#interpolation-within-a-bucket)
|
||||
for more details.)
|
||||
|
||||
If `b` has 0 observations, `NaN` is returned. For φ < 0, `-Inf` is
|
||||
returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned.
|
||||
@ -387,63 +408,46 @@ difference between two buckets is a trillionth (1e-12) of the sum of both
|
||||
buckets.) Furthermore, if there are non-monotonic bucket counts even after this
|
||||
adjustment, they are increased to the value of the previous buckets to enforce
|
||||
monotonicity. The latter is evidence for an actual issue with the input data
|
||||
and is therefore flagged with an informational annotation reading `input to
|
||||
and is therefore flagged by an info-level annotation reading `input to
|
||||
histogram_quantile needed to be fixed for monotonicity`. If you encounter this
|
||||
annotation, you should find and remove the source of the invalid data.
|
||||
|
||||
## `histogram_stddev()` and `histogram_stdvar()`
|
||||
|
||||
_Both functions only act on native histograms, which are an experimental
|
||||
feature. The behavior of these functions may change in future versions of
|
||||
Prometheus, including their removal from PromQL._
|
||||
|
||||
`histogram_stddev(v instant-vector)` returns the estimated standard deviation
|
||||
of observations in a native histogram, based on the geometric mean of the buckets
|
||||
where the observations lie. Samples that are not native histograms are ignored and
|
||||
do not show up in the returned vector.
|
||||
of observations for each histogram sample in `v`, based on the geometric mean
|
||||
of the buckets where the observations lie. Float samples are ignored and do not
|
||||
show up in the returned vector.
|
||||
|
||||
Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard
|
||||
variance of observations in a native histogram.
|
||||
|
||||
## `double_exponential_smoothing()`
|
||||
|
||||
**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
|
||||
|
||||
`double_exponential_smoothing(v range-vector, sf scalar, tf scalar)` produces a smoothed value
|
||||
for time series based on the range in `v`. The lower the smoothing factor `sf`,
|
||||
the more importance is given to old data. The higher the trend factor `tf`, the
|
||||
more trends in the data is considered. Both `sf` and `tf` must be between 0 and
|
||||
1.
|
||||
For additional details, refer to [NIST Engineering Statistics Handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc433.htm).
|
||||
In Prometheus V2 this function was called `holt_winters`. This caused confusion
|
||||
since the Holt-Winters method usually refers to triple exponential smoothing.
|
||||
Double exponential smoothing as implemented here is also referred to as "Holt
|
||||
Linear".
|
||||
|
||||
`double_exponential_smoothing` should only be used with gauges.
|
||||
variance of observations for each histogram sample in `v`.
|
||||
|
||||
## `hour()`
|
||||
|
||||
`hour(v=vector(time()) instant-vector)` returns the hour of the day
|
||||
for each of the given times in UTC. Returned values are from 0 to 23.
|
||||
`hour(v=vector(time()) instant-vector)` interpretes float samples in `v` as
|
||||
timestamps (number of seconds since January 1, 1970 UTC) and returns the hour
|
||||
of the day (in UTC) for each of those timestamps. Returned values are from 0
|
||||
to 23. Histogram samples in the input vector are ignored silently.
|
||||
|
||||
## `idelta()`
|
||||
|
||||
`idelta(v range-vector)` calculates the difference between the last two samples
|
||||
in the range vector `v`, returning an instant vector with the given deltas and
|
||||
equivalent labels.
|
||||
equivalent labels. Both samples must be either float samples or histogram
|
||||
samples. Elements in `v` where one of the last two samples is a float sample
|
||||
and the other is a histogram sample will be omitted from the result vector,
|
||||
flagged by a warn-level annotation.
|
||||
|
||||
`idelta` should only be used with gauges.
|
||||
`idelta` should only be used with gauges (for both floats and histograms).
|
||||
|
||||
## `increase()`
|
||||
|
||||
`increase(v range-vector)` calculates the increase in the
|
||||
time series in the range vector. Breaks in monotonicity (such as counter
|
||||
resets due to target restarts) are automatically adjusted for. The
|
||||
increase is extrapolated to cover the full time range as specified
|
||||
in the range vector selector, so that it is possible to get a
|
||||
non-integer result even if a counter increases only by integer
|
||||
increments.
|
||||
`increase(v range-vector)` calculates the increase in the time series in the
|
||||
range vector. Breaks in monotonicity (such as counter resets due to target
|
||||
restarts) are automatically adjusted for. The increase is extrapolated to cover
|
||||
the full time range as specified in the range vector selector, so that it is
|
||||
possible to get a non-integer result even if a counter increases only by
|
||||
integer increments.
|
||||
|
||||
The following example expression returns the number of HTTP requests as measured
|
||||
over the last 5 minutes, per time series in the range vector:
|
||||
@ -452,19 +456,20 @@ over the last 5 minutes, per time series in the range vector:
|
||||
increase(http_requests_total{job="api-server"}[5m])
|
||||
```
|
||||
|
||||
`increase` acts on native histograms by calculating a new histogram where each
|
||||
component (sum and count of observations, buckets) is the increase between
|
||||
the respective component in the first and last native histogram in
|
||||
`v`. However, each element in `v` that contains a mix of float and native
|
||||
histogram samples within the range, will be missing from the result vector.
|
||||
`increase` acts on histogram samples by calculating a new histogram where each
|
||||
component (sum and count of observations, buckets) is the increase between the
|
||||
respective component in the first and last native histogram in `v`. However,
|
||||
each element in `v` that contains a mix of float samples and histogram samples
|
||||
within the range, will be omitted from the result vector, flagged by a
|
||||
warn-level annotation.
|
||||
|
||||
`increase` should only be used with counters and native histograms where the
|
||||
components behave like counters. It is syntactic sugar for `rate(v)` multiplied
|
||||
by the number of seconds under the specified time range window, and should be
|
||||
used primarily for human readability. Use `rate` in recording rules so that
|
||||
increases are tracked consistently on a per-second basis.
|
||||
`increase` should only be used with counters (for both floats and histograms).
|
||||
It is syntactic sugar for `rate(v)` multiplied by the number of seconds under
|
||||
the specified time range window, and should be used primarily for human
|
||||
readability. Use `rate` in recording rules so that increases are tracked
|
||||
consistently on a per-second basis.
|
||||
|
||||
## `info()` (experimental)
|
||||
## `info()`
|
||||
|
||||
_The `info` function is an experiment to improve UX
|
||||
around including labels from [info metrics](https://grafana.com/blog/2021/08/04/how-to-use-promql-joins-for-more-effective-queries-of-prometheus-metrics-at-scale/#info-metrics).
|
||||
@ -560,7 +565,12 @@ consider all matching info series and with their appropriate identifying labels.
|
||||
`irate(v range-vector)` calculates the per-second instant rate of increase of
|
||||
the time series in the range vector. This is based on the last two data points.
|
||||
Breaks in monotonicity (such as counter resets due to target restarts) are
|
||||
automatically adjusted for.
|
||||
automatically adjusted for. Both samples must be either float samples or
|
||||
histogram samples. Elements in `v` where one of the last two samples is a float
|
||||
sample and the other is a histogram sample will be omitted from the result
|
||||
vector, flagged by a warn-level annotation.
|
||||
|
||||
`irate` should only be used with counters (for both floats and histograms).
|
||||
|
||||
The following example expression returns the per-second rate of HTTP requests
|
||||
looking up to 5 minutes back for the two most recent data points, per time
|
||||
@ -618,8 +628,8 @@ label_replace(up{job="api-server",service="a:c"}, "foo", "$name", "service", "(?
|
||||
|
||||
## `ln()`
|
||||
|
||||
`ln(v instant-vector)` calculates the natural logarithm for all elements in `v`.
|
||||
Special cases are:
|
||||
`ln(v instant-vector)` calculates the natural logarithm for all float samples
|
||||
in `v`. Histogram samples in the input vector are ignored silently. Special cases are:
|
||||
|
||||
* `ln(+Inf) = +Inf`
|
||||
* `ln(0) = -Inf`
|
||||
@ -628,35 +638,45 @@ Special cases are:
|
||||
|
||||
## `log2()`
|
||||
|
||||
`log2(v instant-vector)` calculates the binary logarithm for all elements in `v`.
|
||||
The special cases are equivalent to those in `ln`.
|
||||
`log2(v instant-vector)` calculates the binary logarithm for all float samples
|
||||
in `v`. Histogram samples in the input vector are ignored silently. The special cases
|
||||
are equivalent to those in `ln`.
|
||||
|
||||
## `log10()`
|
||||
|
||||
`log10(v instant-vector)` calculates the decimal logarithm for all elements in `v`.
|
||||
The special cases are equivalent to those in `ln`.
|
||||
`log10(v instant-vector)` calculates the decimal logarithm for all float
|
||||
samples in `v`. Histogram samples in the input vector are ignored silently. The special
|
||||
cases are equivalent to those in `ln`.
|
||||
|
||||
## `minute()`
|
||||
|
||||
`minute(v=vector(time()) instant-vector)` returns the minute of the hour for each
|
||||
of the given times in UTC. Returned values are from 0 to 59.
|
||||
`minute(v=vector(time()) instant-vector)` interpretes float samples in `v` as
|
||||
timestamps (number of seconds since January 1, 1970 UTC) and returns the minute
|
||||
of the hour (in UTC) for each of those timestamps. Returned values are from 0
|
||||
to 59. Histogram samples in the input vector are ignored silently.
|
||||
|
||||
## `month()`
|
||||
|
||||
`month(v=vector(time()) instant-vector)` returns the month of the year for each
|
||||
of the given times in UTC. Returned values are from 1 to 12, where 1 means
|
||||
January etc.
|
||||
`month(v=vector(time()) instant-vector)` interpretes float samples in `v` as
|
||||
timestamps (number of seconds since January 1, 1970 UTC) and returns the month
|
||||
of the year (in UTC) for each of those timestamps. Returned values are from 1
|
||||
to 12, where 1 means January etc. Histogram samples in the input vector are
|
||||
ignored silently.
|
||||
|
||||
## `predict_linear()`
|
||||
|
||||
`predict_linear(v range-vector, t scalar)` predicts the value of time series
|
||||
`t` seconds from now, based on the range vector `v`, using [simple linear
|
||||
regression](https://en.wikipedia.org/wiki/Simple_linear_regression).
|
||||
The range vector must have at least two samples in order to perform the
|
||||
calculation. When `+Inf` or `-Inf` are found in the range vector,
|
||||
the slope and offset value calculated will be `NaN`.
|
||||
regression](https://en.wikipedia.org/wiki/Simple_linear_regression). The range
|
||||
vector must have at least two float samples in order to perform the
|
||||
calculation. When `+Inf` or `-Inf` are found in the range vector, the predicted
|
||||
value will be `NaN`.
|
||||
|
||||
`predict_linear` should only be used with gauges.
|
||||
`predict_linear` should only be used with gauges and only works for float
|
||||
samples. Elements in the range vector that contain only histogram samples are
|
||||
ignored entirely. For elements that contain a mix of float and histogram
|
||||
samples, only the float samples are used as input, which is flagged by an
|
||||
info-level annotation.
|
||||
|
||||
## `rate()`
|
||||
|
||||
@ -666,7 +686,7 @@ resets due to target restarts) are automatically adjusted for. Also, the
|
||||
calculation extrapolates to the ends of the time range, allowing for missed
|
||||
scrapes or imperfect alignment of scrape cycles with the range's time period.
|
||||
|
||||
The following example expression returns the per-second rate of HTTP requests as measured
|
||||
The following example expression returns the per-second average rate of HTTP requests
|
||||
over the last 5 minutes, per time series in the range vector:
|
||||
|
||||
```
|
||||
@ -675,13 +695,13 @@ rate(http_requests_total{job="api-server"}[5m])
|
||||
|
||||
`rate` acts on native histograms by calculating a new histogram where each
|
||||
component (sum and count of observations, buckets) is the rate of increase
|
||||
between the respective component in the first and last native histogram in
|
||||
`v`. However, each element in `v` that contains a mix of float and native
|
||||
histogram samples within the range, will be missing from the result vector.
|
||||
between the respective component in the first and last native histogram in `v`.
|
||||
However, each element in `v` that contains a mix of float and native histogram
|
||||
samples within the range, will be omitted from the result vector, flagged by a
|
||||
warn-level annotation.
|
||||
|
||||
`rate` should only be used with counters and native histograms where the
|
||||
components behave like counters. It is best suited for alerting, and for
|
||||
graphing of slow-moving counters.
|
||||
`rate` should only be used with counters (for both floats and histograms). It
|
||||
is best suited for alerting, and for graphing of slow-moving counters.
|
||||
|
||||
Note that when combining `rate()` with an aggregation operator (e.g. `sum()`)
|
||||
or a function aggregating over time (any function ending in `_over_time`),
|
||||
@ -696,17 +716,15 @@ decrease in the value between two consecutive float samples is interpreted as a
|
||||
counter reset. A reset in a native histogram is detected in a more complex way:
|
||||
Any decrease in any bucket, including the zero bucket, or in the count of
|
||||
observation constitutes a counter reset, but also the disappearance of any
|
||||
previously populated bucket, an increase in bucket resolution, or a decrease of
|
||||
the zero-bucket width.
|
||||
previously populated bucket, a decrease of the zero-bucket width, or any schema
|
||||
change that is not a compatible decrease of resolution.
|
||||
|
||||
`resets` should only be used with counters and counter-like native
|
||||
histograms.
|
||||
`resets` should only be used with counters (for both floats and histograms).
|
||||
|
||||
If the range vector contains a mix of float and histogram samples for the same
|
||||
series, counter resets are detected separately and their numbers added up. The
|
||||
change from a float to a histogram sample is _not_ considered a counter
|
||||
reset. Each float sample is compared to the next float sample, and each
|
||||
histogram is comprared to the next histogram.
|
||||
A float sample followed by a histogram sample, or vice versa, counts as a
|
||||
reset. A counter histogram sample followed by a gauge histogram sample, or vice
|
||||
versa, also counts as a reset (but note that `resets` should not be used on
|
||||
gauges in the first place, see above).
|
||||
|
||||
## `round()`
|
||||
|
||||
@ -714,53 +732,63 @@ histogram is comprared to the next histogram.
|
||||
elements in `v` to the nearest integer. Ties are resolved by rounding up. The
|
||||
optional `to_nearest` argument allows specifying the nearest multiple to which
|
||||
the sample values should be rounded. This multiple may also be a fraction.
|
||||
Histogram samples in the input vector are ignored silently.
|
||||
|
||||
## `scalar()`
|
||||
|
||||
Given a single-element input vector, `scalar(v instant-vector)` returns the
|
||||
sample value of that single element as a scalar. If the input vector does not
|
||||
have exactly one element, `scalar` will return `NaN`.
|
||||
Given an input vector that contains only one element with a float sample,
|
||||
`scalar(v instant-vector)` returns the sample value of that float sample as a
|
||||
scalar. If the input vector does not have exactly one element with a float
|
||||
sample, `scalar` will return `NaN`. Histogram samples in the input vector are
|
||||
ignored silently.
|
||||
|
||||
## `sgn()`
|
||||
|
||||
`sgn(v instant-vector)` returns a vector with all sample values converted to their sign, defined as this: 1 if v is positive, -1 if v is negative and 0 if v is equal to zero.
|
||||
`sgn(v instant-vector)` returns a vector with all float sample values converted
|
||||
to their sign, defined as this: 1 if v is positive, -1 if v is negative and 0
|
||||
if v is equal to zero. Histogram samples in the input vector are ignored silently.
|
||||
|
||||
## `sort()`
|
||||
|
||||
`sort(v instant-vector)` returns vector elements sorted by their sample values,
|
||||
in ascending order. Native histograms are sorted by their sum of observations.
|
||||
`sort(v instant-vector)` returns vector elements sorted by their float sample
|
||||
values, in ascending order. Histogram samples in the input vector are ignored silently.
|
||||
|
||||
Please note that `sort` only affects the results of instant queries, as range query results always have a fixed output ordering.
|
||||
Please note that `sort` only affects the results of instant queries, as range
|
||||
query results always have a fixed output ordering.
|
||||
|
||||
## `sort_desc()`
|
||||
|
||||
Same as `sort`, but sorts in descending order.
|
||||
|
||||
Like `sort`, `sort_desc` only affects the results of instant queries, as range query results always have a fixed output ordering.
|
||||
|
||||
## `sort_by_label()`
|
||||
|
||||
**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
|
||||
**This function has to be enabled via the [feature
|
||||
flag](../feature_flags.md#experimental-promql-functions)
|
||||
`--enable-feature=promql-experimental-functions`.**
|
||||
|
||||
`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by the values of the given labels in ascending order. In case these label values are equal, elements are sorted by their full label sets.
|
||||
`sort_by_label(v instant-vector, label string, ...)` returns vector elements
|
||||
sorted by the values of the given labels in ascending order. In case these
|
||||
label values are equal, elements are sorted by their full label sets.
|
||||
`sort_by_label` acts on float and histogram samples in the same way.
|
||||
|
||||
Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering.
|
||||
Please note that `sort_by_label` only affect the results of instant queries, as
|
||||
range query results always have a fixed output ordering.
|
||||
|
||||
This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_sort_order).
|
||||
`sort_by_label` uses [natural sort
|
||||
order](https://en.wikipedia.org/wiki/Natural_sort_order).
|
||||
|
||||
## `sort_by_label_desc()`
|
||||
|
||||
**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
|
||||
**This function has to be enabled via the [feature
|
||||
flag](../feature_flags.md#experimental-promql-functions)
|
||||
`--enable-feature=promql-experimental-functions`.**
|
||||
|
||||
Same as `sort_by_label`, but sorts in descending order.
|
||||
|
||||
Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering.
|
||||
|
||||
This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_sort_order).
|
||||
|
||||
## `sqrt()`
|
||||
|
||||
`sqrt(v instant-vector)` calculates the square root of all elements in `v`.
|
||||
`sqrt(v instant-vector)` calculates the square root of all float samples in
|
||||
`v`. Histogram samples in the input vector are ignored silently.
|
||||
|
||||
## `time()`
|
||||
|
||||
@ -771,66 +799,80 @@ expression is to be evaluated.
|
||||
## `timestamp()`
|
||||
|
||||
`timestamp(v instant-vector)` returns the timestamp of each of the samples of
|
||||
the given vector as the number of seconds since January 1, 1970 UTC. It also
|
||||
works with histogram samples.
|
||||
the given vector as the number of seconds since January 1, 1970 UTC. It acts on
|
||||
float and histogram samples in the same way.
|
||||
|
||||
## `vector()`
|
||||
|
||||
`vector(s scalar)` returns the scalar `s` as a vector with no labels.
|
||||
`vector(s scalar)` converts the scalar `s` to a float sample and returns it as
|
||||
a single-element instant vector with no labels.
|
||||
|
||||
## `year()`
|
||||
|
||||
`year(v=vector(time()) instant-vector)` returns the year
|
||||
for each of the given times in UTC.
|
||||
`year(v=vector(time()) instant-vector)` returns the year for each of the given
|
||||
times in UTC. Histogram samples in the input vector are ignored silently.
|
||||
|
||||
## `<aggregation>_over_time()`
|
||||
|
||||
The following functions allow aggregating each series of a given range vector
|
||||
over time and return an instant vector with per-series aggregation results:
|
||||
|
||||
* `avg_over_time(range-vector)`: the average value of all points in the specified interval.
|
||||
* `min_over_time(range-vector)`: the minimum value of all points in the specified interval.
|
||||
* `max_over_time(range-vector)`: the maximum value of all points in the specified interval.
|
||||
* `sum_over_time(range-vector)`: the sum of all values in the specified interval.
|
||||
* `count_over_time(range-vector)`: the count of all values in the specified interval.
|
||||
* `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified interval.
|
||||
* `stddev_over_time(range-vector)`: the population standard deviation of the values in the specified interval.
|
||||
* `stdvar_over_time(range-vector)`: the population standard variance of the values in the specified interval.
|
||||
* `last_over_time(range-vector)`: the most recent point value in the specified interval.
|
||||
* `avg_over_time(range-vector)`: the average value of all float or histogram samples in the specified interval (see details below).
|
||||
* `min_over_time(range-vector)`: the minimum value of all float samples in the specified interval.
|
||||
* `max_over_time(range-vector)`: the maximum value of all float samples in the specified interval.
|
||||
* `sum_over_time(range-vector)`: the sum of all float or histogram samples in the specified interval (see details below).
|
||||
* `count_over_time(range-vector)`: the count of all samples in the specified interval.
|
||||
* `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the specified interval.
|
||||
* `stddev_over_time(range-vector)`: the population standard deviation of all float samples in the specified interval.
|
||||
* `stdvar_over_time(range-vector)`: the population standard variance of all float samples in the specified interval.
|
||||
* `last_over_time(range-vector)`: the most recent sample in the specified interval.
|
||||
* `present_over_time(range-vector)`: the value 1 for any series in the specified interval.
|
||||
|
||||
If the [feature flag](../feature_flags.md#experimental-promql-functions)
|
||||
`--enable-feature=promql-experimental-functions` is set, the following
|
||||
additional functions are available:
|
||||
|
||||
* `mad_over_time(range-vector)`: the median absolute deviation of all points in the specified interval.
|
||||
* `mad_over_time(range-vector)`: the median absolute deviation of all float
|
||||
samples in the specified interval.
|
||||
|
||||
Note that all values in the specified interval have the same weight in the
|
||||
aggregation even if the values are not equally spaced throughout the interval.
|
||||
|
||||
`avg_over_time`, `sum_over_time`, `count_over_time`, `last_over_time`, and
|
||||
`present_over_time` handle native histograms as expected. All other functions
|
||||
ignore histogram samples.
|
||||
These functions act on histograms in the following way:
|
||||
|
||||
- `count_over_time`, `last_over_time`, and `present_over_time()` act on float
|
||||
and histogram samples in the same way.
|
||||
- `avg_over_time()` and `sum_over_time()` act on histogram samples in a way
|
||||
that corresponds to the respective aggregation operators. If a series
|
||||
contains a mix of float samples and histogram samples within the range, the
|
||||
corresponding result is removed entirely from the output vector. Such a
|
||||
removal is flagged by a warn-level annotation.
|
||||
- All other functions ignore histogram samples in the following way: Input
|
||||
ranges containing only histogram samples are silently removed from the
|
||||
output. For ranges with a mix of histogram and float samples, only the float
|
||||
samples are processed and the omission of the histogram samples is flagged by
|
||||
an info-level annotation.
|
||||
|
||||
## Trigonometric Functions
|
||||
|
||||
The trigonometric functions work in radians:
|
||||
The trigonometric functions work in radians. They ignore histogram samples in
|
||||
the input vector.
|
||||
|
||||
* `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)).
|
||||
* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
|
||||
* `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)).
|
||||
* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
|
||||
* `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)).
|
||||
* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
|
||||
* `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)).
|
||||
* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
|
||||
* `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)).
|
||||
* `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
|
||||
* `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)).
|
||||
* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
|
||||
* `acos(v instant-vector)`: calculates the arccosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Acos)).
|
||||
* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
|
||||
* `asin(v instant-vector)`: calculates the arcsine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Asin)).
|
||||
* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
|
||||
* `atan(v instant-vector)`: calculates the arctangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Atan)).
|
||||
* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
|
||||
* `cos(v instant-vector)`: calculates the cosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Cos)).
|
||||
* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
|
||||
* `sin(v instant-vector)`: calculates the sine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Sin)).
|
||||
* `sinh(v instant-vector)`: calculates the hyperbolic sine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
|
||||
* `tan(v instant-vector)`: calculates the tangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Tan)).
|
||||
* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
|
||||
|
||||
The following are useful for converting between degrees and radians:
|
||||
|
||||
* `deg(v instant-vector)`: converts radians to degrees for all elements in `v`.
|
||||
* `deg(v instant-vector)`: converts radians to degrees for all float samples in `v`.
|
||||
* `pi()`: returns pi.
|
||||
* `rad(v instant-vector)`: converts degrees to radians for all elements in `v`.
|
||||
* `rad(v instant-vector)`: converts degrees to radians for all float samples in `v`.
|
||||
|
||||
@ -23,22 +23,51 @@ The following binary arithmetic operators exist in Prometheus:
|
||||
* `^` (power/exponentiation)
|
||||
|
||||
Binary arithmetic operators are defined between scalar/scalar, vector/scalar,
|
||||
and vector/vector value pairs.
|
||||
and vector/vector value pairs. They follow the usual [IEEE 754 floating point
|
||||
arithmetic](https://en.wikipedia.org/wiki/IEEE_754), including the handling of
|
||||
special values like `NaN`, `+Inf`, and `-Inf`.
|
||||
|
||||
**Between two scalars**, the behavior is obvious: they evaluate to another
|
||||
scalar that is the result of the operator applied to both scalar operands.
|
||||
|
||||
**Between an instant vector and a scalar**, the operator is applied to the
|
||||
value of every data sample in the vector. E.g. if a time series instant vector
|
||||
is multiplied by 2, the result is another vector in which every sample value of
|
||||
the original vector is multiplied by 2. The metric name is dropped.
|
||||
value of every data sample in the vector. If the data sample is a float, the
|
||||
operation performed on the data sample is again obvious, e.g. if an instant
|
||||
vector of float samples is multiplied by 2, the result is another vector of
|
||||
float samples in which every sample value of the original vector is multiplied
|
||||
by 2. For vector elements that are histogram samples, the behavior is the
|
||||
following: For `*`, all bucket populations and the count and the sum of
|
||||
observations are multiplied by the scalar. For `/`, the histogram sample has to
|
||||
be on the left hand side (LHS), followed by the scalar on the right hand side
|
||||
(RHS). All bucket populations and the count and the sum of observations are
|
||||
then divided by the scalar. A division by zero results in a histogram with no
|
||||
regular buckets and the zero bucket population and the count and sum of
|
||||
observations all set to +Inf, -Inf, or NaN, depending on their values in the
|
||||
input histogram (positive, negative, or zero/NaN, respectively). For `/` with a
|
||||
scalar on the LHS and a histogram sample on the RHS, and similarly for all
|
||||
other arithmetic binary operators in any combination of a scalar and a
|
||||
histogram sample, there is no result and the corresponding element is removed
|
||||
from the resulting vector. Such a removal is flagged by an info-level
|
||||
annotation.
|
||||
|
||||
**Between two instant vectors**, a binary arithmetic operator is applied to
|
||||
each entry in the left-hand side vector and its [matching element](#vector-matching)
|
||||
in the right-hand vector. The result is propagated into the result vector with the
|
||||
grouping labels becoming the output label set. The metric name is dropped. Entries
|
||||
for which no matching entry in the right-hand vector can be found are not part of
|
||||
the result.
|
||||
each entry in the LHS vector and its [matching element](#vector-matching) in
|
||||
the RHS vector. The result is propagated into the result vector with the
|
||||
grouping labels becoming the output label set. Entries for which no matching
|
||||
entry in the right-hand vector can be found are not part of the result. If two
|
||||
float samples are matched, the behavior is obvious. If a float sample is
|
||||
matched with a histogram sample, the behavior follows the same logic as between
|
||||
a scalar and a histogram sample (see above), i.e. `*` and `/` (the latter with
|
||||
the histogram sample on the LHS) are valid operations, while all others lead to
|
||||
the removal of the corresponding element from the resulting vector. If two
|
||||
histogram samples are matched, only `+` and `-` are valid operations, each
|
||||
adding or substracting all matching bucket populations and the count and the
|
||||
sum of observations. All other operations result in the removal of the
|
||||
corresponding element from the output vector, flagged by an info-level
|
||||
annotation.
|
||||
|
||||
**In any arithmetic binary operation involving vectors**, the metric name is
|
||||
dropped.
|
||||
|
||||
### Trigonometric binary operators
|
||||
|
||||
@ -46,9 +75,12 @@ The following trigonometric binary operators, which work in radians, exist in Pr
|
||||
|
||||
* `atan2` (based on https://pkg.go.dev/math#Atan2)
|
||||
|
||||
Trigonometric operators allow trigonometric functions to be executed on two vectors using
|
||||
vector matching, which isn't available with normal functions. They act in the same manner
|
||||
as arithmetic operators.
|
||||
Trigonometric operators allow trigonometric functions to be executed on two
|
||||
vectors using vector matching, which isn't available with normal functions.
|
||||
They act in the same manner as arithmetic operators. They only operate on float
|
||||
samples. Operations involving histogram samples result in the removal of the
|
||||
corresponding vector elements from the output vector, flagged by an
|
||||
info-level annotation.
|
||||
|
||||
### Comparison binary operators
|
||||
|
||||
@ -72,20 +104,28 @@ operators result in another scalar that is either `0` (`false`) or `1`
|
||||
|
||||
**Between an instant vector and a scalar**, these operators are applied to the
|
||||
value of every data sample in the vector, and vector elements between which the
|
||||
comparison result is `false` get dropped from the result vector. If the `bool`
|
||||
modifier is provided, vector elements that would be dropped instead have the value
|
||||
`0` and vector elements that would be kept have the value `1`. The metric name
|
||||
is dropped if the `bool` modifier is provided.
|
||||
comparison result is `false` get dropped from the result vector. These
|
||||
operation only work with float samples in the vector. For histogram samples,
|
||||
the corresponding element is removed from the result vector, flagged by an
|
||||
info-level annotation.
|
||||
|
||||
**Between two instant vectors**, these operators behave as a filter by default,
|
||||
applied to matching entries. Vector elements for which the expression is not
|
||||
true or which do not find a match on the other side of the expression get
|
||||
dropped from the result, while the others are propagated into a result vector
|
||||
with the grouping labels becoming the output label set.
|
||||
If the `bool` modifier is provided, vector elements that would have been
|
||||
dropped instead have the value `0` and vector elements that would be kept have
|
||||
the value `1`, with the grouping labels again becoming the output label set.
|
||||
The metric name is dropped if the `bool` modifier is provided.
|
||||
with the grouping labels becoming the output label set. Matches between two
|
||||
float samples work as usual, while matches between a float sample and a
|
||||
histogram sample are invalid. The corresponding element is removed from the
|
||||
result vector, flagged by an info-level annotation. Between two histogram
|
||||
samples, `==` and `!=` work as expected, but all other comparison binary
|
||||
operations are again invalid.
|
||||
|
||||
**In any comparison binary operation involving vectors**, providing the `bool`
|
||||
modifier changes the behavior in the following way: Vector elements that would
|
||||
be dropped instead have the value `0` and vector elements that would be kept
|
||||
have the value `1`. Additionally, the metric name is dropped. (Note that
|
||||
invalid operations involving histogram samples still return no result rather
|
||||
than the value `0`.)
|
||||
|
||||
### Logical/set binary operators
|
||||
|
||||
@ -108,6 +148,9 @@ which do not have matching label sets in `vector1`.
|
||||
`vector1` for which there are no elements in `vector2` with exactly matching
|
||||
label sets. All matching elements in both vectors are dropped.
|
||||
|
||||
As these logical/set binary operators do not interact with the sample values,
|
||||
they work in the same way for float samples and histogram samples.
|
||||
|
||||
## Vector matching
|
||||
|
||||
Operations between vectors attempt to find a matching element in the right-hand side
|
||||
@ -219,19 +262,20 @@ used to aggregate the elements of a single instant vector, resulting in a new
|
||||
vector of fewer elements with aggregated values:
|
||||
|
||||
* `sum` (calculate sum over dimensions)
|
||||
* `avg` (calculate the arithmetic average over dimensions)
|
||||
* `min` (select minimum over dimensions)
|
||||
* `max` (select maximum over dimensions)
|
||||
* `avg` (calculate the average over dimensions)
|
||||
* `bottomk` (smallest _k_ elements by sample value)
|
||||
* `topk` (largest _k_ elements by sample value)
|
||||
* `limitk` (sample _k_ elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
|
||||
* `limit_ratio` (sample a pseudo-randem ratio _r_ of elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
|
||||
* `group` (all values in the resulting vector are 1)
|
||||
* `stddev` (calculate population standard deviation over dimensions)
|
||||
* `stdvar` (calculate population standard variance over dimensions)
|
||||
* `count` (count number of elements in the vector)
|
||||
* `count_values` (count number of elements with the same value)
|
||||
* `bottomk` (smallest k elements by sample value)
|
||||
* `topk` (largest k elements by sample value)
|
||||
|
||||
* `stddev` (calculate population standard deviation over dimensions)
|
||||
* `stdvar` (calculate population standard variance over dimensions)
|
||||
* `quantile` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions)
|
||||
* `limitk` (sample n elements)
|
||||
* `limit_ratio` (sample elements with approximately 𝑟 ratio if `𝑟 > 0`, and the complement of such samples if `𝑟 = -(1.0 - 𝑟)`)
|
||||
|
||||
These operators can either be used to aggregate over **all** label dimensions
|
||||
or preserve distinct dimensions by including a `without` or `by` clause. These
|
||||
@ -251,29 +295,67 @@ all other labels are preserved in the output. `by` does the opposite and drops
|
||||
labels that are not listed in the `by` clause, even if their label values are
|
||||
identical between all elements of the vector.
|
||||
|
||||
`parameter` is only required for `count_values`, `quantile`, `topk`,
|
||||
`bottomk`, `limitk` and `limit_ratio`.
|
||||
`parameter` is only required for `topk`, `bottomk`, `limitk`, `limit_ratio`,
|
||||
`quantile`, and `count_values`. It is used as the value for _k_, _r_, φ, or the
|
||||
name of the additional label, respectively.
|
||||
|
||||
### Detailed explanations
|
||||
|
||||
`sum` sums up sample values in the same way as the `+` binary operator does
|
||||
between two values. Similarly, `avg` divides the sum by the number of
|
||||
aggregated samples in the same way as the `/` binary operator. Therefore, all
|
||||
sample values aggregation into a single resulting vector element must either be
|
||||
float samples or histogram samples. An aggregation of a mix of both is invalid,
|
||||
resulting in the removeal of the corresponding vector element from the output
|
||||
vector, flagged by a warn-level annotation.
|
||||
|
||||
`min` and `max` only operate on float samples, following IEEE 754 floating
|
||||
point arithmetic, which in particular implies that `NaN` is only ever
|
||||
considered a minimum or maximum if all aggregated values are `NaN`. Histogram
|
||||
samples in the input vector are ignored, flagged by an info-level annotation.
|
||||
|
||||
`topk` and `bottomk` are different from other aggregators in that a subset of
|
||||
the input samples, including the original labels, are returned in the result
|
||||
vector. `by` and `without` are only used to bucket the input vector. Similar to
|
||||
`min` and `max`, they only operate on float samples, considering `NaN` values
|
||||
to be farthest from the top or bottom, respectively. Histogram samples in the
|
||||
input vector are ignored, flagged by an info-level annotation.
|
||||
|
||||
`limitk` and `limit_ratio` also return a subset of the input samples, including
|
||||
the original labels in the result vector. The subset is selected in a
|
||||
deterministic pseudo-random way. `limitk` picks _k_ samples, while
|
||||
`limit_ratio` picks a ratio _r_ of samples (each determined by `parameter`).
|
||||
This happens independent of the sample type. Therefore, it works for both float
|
||||
samples and histogram samples. _r_ can be between +1 and -1. The absolute value
|
||||
of _r_ is used as the selection ratio, but the selection order is inverted for
|
||||
a negative _r_, which can be used to select complements. For example,
|
||||
`limit_ratio(0.1, ...)` returns a deterministic set of approximatiely 10% of
|
||||
the input samples, while `limit_ratio(-0.9, ...)` returns precisely the
|
||||
remaining approximately 90% of the input samples not returned by
|
||||
`limit_ratio(0.1, ...)`.
|
||||
|
||||
`group` and `count` do not do not interact with the sample values,
|
||||
they work in the same way for float samples and histogram samples.
|
||||
|
||||
`count_values` outputs one time series per unique sample value. Each series has
|
||||
an additional label. The name of that label is given by the aggregation
|
||||
parameter, and the label value is the unique sample value. The value of each
|
||||
time series is the number of times that sample value was present.
|
||||
`count_values` works with both float samples and histogram samples. For the
|
||||
latter, a compact string representation of the histogram sample value is used
|
||||
as the label value.
|
||||
|
||||
`topk` and `bottomk` are different from other aggregators in that a subset of
|
||||
the input samples, including the original labels, are returned in the result
|
||||
vector. `by` and `without` are only used to bucket the input vector.
|
||||
|
||||
`limitk` and `limit_ratio` also return a subset of the input samples,
|
||||
including the original labels in the result vector, these are experimental
|
||||
operators that must be enabled with `--enable-feature=promql-experimental-functions`.
|
||||
`stddev` and `stdvar` only work with float samples, following IEEE 754 floating
|
||||
point arithmetic. Histogram samples in the input vector are ignored, flagged by
|
||||
an info-level annotation.
|
||||
|
||||
`quantile` calculates the φ-quantile, the value that ranks at number φ*N among
|
||||
the N metric values of the dimensions aggregated over. φ is provided as the
|
||||
aggregation parameter. For example, `quantile(0.5, ...)` calculates the median,
|
||||
`quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned.
|
||||
`quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned.
|
||||
For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned.
|
||||
|
||||
|
||||
Example:
|
||||
### Examples
|
||||
|
||||
If the metric `http_requests_total` had time series that fan out by
|
||||
`application`, `instance`, and `group` labels, we could calculate the total
|
||||
@ -303,28 +385,6 @@ could write:
|
||||
|
||||
limitk(10, http_requests_total)
|
||||
|
||||
To deterministically sample approximately 10% of timeseries we could write:
|
||||
|
||||
limit_ratio(0.1, http_requests_total)
|
||||
|
||||
Given that `limit_ratio()` implements a deterministic sampling algorithm (based
|
||||
on labels' hash), you can get the _complement_ of the above samples, i.e.
|
||||
approximately 90%, but precisely those not returned by `limit_ratio(0.1, ...)`
|
||||
with:
|
||||
|
||||
limit_ratio(-0.9, http_requests_total)
|
||||
|
||||
You can also use this feature to e.g. verify that `avg()` is a representative
|
||||
aggregation for your samples' values, by checking that the difference between
|
||||
averaging two samples' subsets is "small" when compared to the standard
|
||||
deviation.
|
||||
|
||||
abs(
|
||||
avg(limit_ratio(0.5, http_requests_total))
|
||||
-
|
||||
avg(limit_ratio(-0.5, http_requests_total))
|
||||
) <= bool stddev(http_requests_total)
|
||||
|
||||
## Binary operator precedence
|
||||
|
||||
The following list shows the precedence of binary operators in Prometheus, from
|
||||
@ -340,35 +400,3 @@ highest to lowest.
|
||||
Operators on the same precedence level are left-associative. For example,
|
||||
`2 * 3 % 2` is equivalent to `(2 * 3) % 2`. However `^` is right associative,
|
||||
so `2 ^ 3 ^ 2` is equivalent to `2 ^ (3 ^ 2)`.
|
||||
|
||||
## Operators for native histograms
|
||||
|
||||
Native histograms are an experimental feature. Ingesting native histograms has
|
||||
to be enabled via a [feature flag](../../feature_flags.md#native-histograms). Once
|
||||
native histograms have been ingested, they can be queried (even after the
|
||||
feature flag has been disabled again). However, the operator support for native
|
||||
histograms is still very limited.
|
||||
|
||||
Logical/set binary operators work as expected even if histogram samples are
|
||||
involved. They only check for the existence of a vector element and don't
|
||||
change their behavior depending on the sample type of an element (float or
|
||||
histogram). The `count` aggregation operator works similarly.
|
||||
|
||||
The binary `+` and `-` operators between two native histograms and the `sum`
|
||||
and `avg` aggregation operators to aggregate native histograms are fully
|
||||
supported. Even if the histograms involved have different bucket layouts, the
|
||||
buckets are automatically converted appropriately so that the operation can be
|
||||
performed. (With the currently supported bucket schemas, that's always
|
||||
possible.) If either operator has to aggregate a mix of histogram samples and
|
||||
float samples, the corresponding vector element is removed from the output
|
||||
vector entirely.
|
||||
|
||||
The binary `*` operator works between a native histogram and a float in any
|
||||
order, while the binary `/` operator can be used between a native histogram
|
||||
and a float in that exact order.
|
||||
|
||||
All other operators (and unmentioned cases for the above operators) do not
|
||||
behave in a meaningful way. They either treat the histogram sample as if it
|
||||
were a float sample of value 0, or (in case of arithmetic operations between a
|
||||
scalar and a vector) they leave the histogram sample unchanged. This behavior
|
||||
will change to a meaningful one before native histograms are a stable feature.
|
||||
|
||||
@ -8,7 +8,7 @@ require (
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.14.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/common v0.61.0
|
||||
github.com/prometheus/common v0.62.0
|
||||
github.com/prometheus/prometheus v1.99.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
)
|
||||
@ -66,7 +66,7 @@ require (
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
|
||||
google.golang.org/grpc v1.65.0 // indirect
|
||||
google.golang.org/protobuf v1.35.2 // indirect
|
||||
google.golang.org/protobuf v1.36.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apimachinery v0.29.3 // indirect
|
||||
|
||||
@ -273,8 +273,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
|
||||
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
@ -421,8 +421,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@ -56,7 +56,7 @@ local row = panel.row;
|
||||
+ variable.query.selectionOptions.withIncludeAll(true, '.+')
|
||||
+ variable.query.selectionOptions.withMulti(true)
|
||||
+ if showMultiCluster then
|
||||
variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{cluster=~"$cluster"}')
|
||||
variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster"}' % $._config)
|
||||
else
|
||||
variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{%(prometheusSelector)s}' % $._config)
|
||||
;
|
||||
@ -70,7 +70,7 @@ local row = panel.row;
|
||||
+ variable.query.selectionOptions.withIncludeAll(true, '.+')
|
||||
+ variable.query.selectionOptions.withMulti(true)
|
||||
+ if showMultiCluster then
|
||||
variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{cluster=~"$cluster", job=~"$job"}')
|
||||
variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster", job=~"$job"}' % $._config)
|
||||
else
|
||||
variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{job=~"$job"}')
|
||||
;
|
||||
@ -121,14 +121,14 @@ local row = panel.row;
|
||||
panel.table.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})'
|
||||
'count by (cluster, job, instance, version) (prometheus_build_info{%(clusterLabel)s=~"$cluster", job=~"$job", instance=~"$instance"})' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('table')
|
||||
+ prometheus.withInstant(true)
|
||||
+ prometheus.withLegendFormat(''),
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})'
|
||||
'max by (cluster, job, instance) (time() - process_start_time_seconds{%(clusterLabel)s=~"$cluster", job=~"$job", instance=~"$instance"})' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('table')
|
||||
+ prometheus.withInstant(true)
|
||||
@ -163,10 +163,10 @@ local row = panel.row;
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3'
|
||||
'sum(rate(prometheus_target_sync_length_seconds_sum{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (%(clusterLabel)s, job, scrape_job, instance) * 1e3' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}:{{scrape_job}}' % $._config),
|
||||
])
|
||||
else
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
@ -190,10 +190,10 @@ local row = panel.row;
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})'
|
||||
'sum by (%(clusterLabel)s, job, instance) (prometheus_sd_discovered_targets{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"})' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}' % $._config),
|
||||
])
|
||||
else
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
@ -216,10 +216,10 @@ local row = panel.row;
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3'
|
||||
'rate(prometheus_target_interval_length_seconds_sum{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}} {{interval}} configured'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}} {{interval}} configured' % $._config),
|
||||
])
|
||||
else
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
@ -243,34 +243,34 @@ local row = panel.row;
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))'
|
||||
'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('exceeded body size limit: {{cluster}} {{job}} {{instance}}'),
|
||||
+ prometheus.withLegendFormat('exceeded body size limit: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config),
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))'
|
||||
'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('exceeded sample limit: {{cluster}} {{job}} {{instance}}'),
|
||||
+ prometheus.withLegendFormat('exceeded sample limit: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config),
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))'
|
||||
'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('duplicate timestamp: {{cluster}} {{job}} {{instance}}'),
|
||||
+ prometheus.withLegendFormat('duplicate timestamp: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config),
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))'
|
||||
'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('out of bounds: {{cluster}} {{job}} {{instance}}'),
|
||||
+ prometheus.withLegendFormat('out of bounds: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config),
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))'
|
||||
'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('out of order: {{cluster}} {{job}} {{instance}}'),
|
||||
+ prometheus.withLegendFormat('out of order: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config),
|
||||
])
|
||||
else
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
@ -318,10 +318,10 @@ local row = panel.row;
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])'
|
||||
'rate(prometheus_tsdb_head_samples_appended_total{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}}' % $._config),
|
||||
])
|
||||
else
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
@ -345,10 +345,10 @@ local row = panel.row;
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
|
||||
'prometheus_tsdb_head_series{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}} head series'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}} head series' % $._config),
|
||||
])
|
||||
else
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
@ -372,10 +372,10 @@ local row = panel.row;
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
|
||||
'prometheus_tsdb_head_chunks{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}} head chunks'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}} head chunks' % $._config),
|
||||
])
|
||||
else
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
@ -399,10 +399,10 @@ local row = panel.row;
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])'
|
||||
'rate(prometheus_engine_query_duration_seconds_count{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}}' % $._config),
|
||||
])
|
||||
else
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
@ -426,7 +426,7 @@ local row = panel.row;
|
||||
panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3'
|
||||
'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withLegendFormat('{{slice}}'),
|
||||
@ -514,7 +514,7 @@ local row = panel.row;
|
||||
+ variable.query.withDatasourceFromVariable(datasourceVariable)
|
||||
+ variable.query.refresh.onTime()
|
||||
+ variable.query.selectionOptions.withIncludeAll(true)
|
||||
+ variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{cluster=~"$cluster"}')
|
||||
+ variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster"}' % $._config)
|
||||
;
|
||||
|
||||
local urlVariable =
|
||||
@ -522,7 +522,7 @@ local row = panel.row;
|
||||
+ variable.query.withDatasourceFromVariable(datasourceVariable)
|
||||
+ variable.query.refresh.onTime()
|
||||
+ variable.query.selectionOptions.withIncludeAll(true)
|
||||
+ variable.query.queryTypes.withLabelValues('url', metric='prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}')
|
||||
+ variable.query.queryTypes.withLabelValues('url', metric='prometheus_remote_storage_shards{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config)
|
||||
;
|
||||
|
||||
local timestampComparison =
|
||||
@ -534,15 +534,15 @@ local row = panel.row;
|
||||
'$datasource',
|
||||
|||
|
||||
(
|
||||
prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}
|
||||
prometheus_remote_storage_highest_timestamp_in_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance"}
|
||||
-
|
||||
ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"} != 0)
|
||||
ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"} != 0)
|
||||
)
|
||||
|||
|
||||
||| % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}::{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local timestampComparisonRate =
|
||||
@ -554,15 +554,15 @@ local row = panel.row;
|
||||
'$datasource',
|
||||
|||
|
||||
clamp_min(
|
||||
rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m])
|
||||
rate(prometheus_remote_storage_highest_timestamp_in_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance"}[5m])
|
||||
-
|
||||
ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])
|
||||
ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])
|
||||
, 0)
|
||||
|||
|
||||
||| % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local samplesRate =
|
||||
@ -574,16 +574,16 @@ local row = panel.row;
|
||||
'$datasource',
|
||||
|||
|
||||
rate(
|
||||
prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m])
|
||||
prometheus_remote_storage_samples_in_total{%(clusterLabel)s=~"$cluster", instance=~"$instance"}[5m])
|
||||
-
|
||||
ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]))
|
||||
ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]))
|
||||
-
|
||||
(rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]))
|
||||
|||
|
||||
(rate(prometheus_remote_storage_dropped_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]))
|
||||
||| % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local currentShards =
|
||||
@ -593,11 +593,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance", url=~"$url"}'
|
||||
'prometheus_remote_storage_shards{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local maxShards =
|
||||
@ -607,11 +607,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance", url=~"$url"}'
|
||||
'prometheus_remote_storage_shards_max{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local minShards =
|
||||
@ -621,11 +621,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance", url=~"$url"}'
|
||||
'prometheus_remote_storage_shards_min{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local desiredShards =
|
||||
@ -635,11 +635,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance", url=~"$url"}'
|
||||
'prometheus_remote_storage_shards_desired{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local shardsCapacity =
|
||||
@ -649,11 +649,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance", url=~"$url"}'
|
||||
'prometheus_remote_storage_shard_capacity{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local pendingSamples =
|
||||
@ -663,11 +663,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{cluster=~"$cluster", instance=~"$instance", url=~"$url"}'
|
||||
'prometheus_remote_storage_pending_samples{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local walSegment =
|
||||
@ -679,11 +679,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'prometheus_tsdb_wal_segment_current{cluster=~"$cluster", instance=~"$instance"}'
|
||||
'prometheus_tsdb_wal_segment_current{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}}' % $._config),
|
||||
]);
|
||||
|
||||
local queueSegment =
|
||||
@ -695,11 +695,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'prometheus_wal_watcher_current_segment{cluster=~"$cluster", instance=~"$instance"}'
|
||||
'prometheus_wal_watcher_current_segment{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{consumer}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{consumer}}' % $._config),
|
||||
]);
|
||||
|
||||
local droppedSamples =
|
||||
@ -710,11 +710,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])'
|
||||
'rate(prometheus_remote_storage_dropped_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local failedSamples =
|
||||
@ -725,11 +725,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])'
|
||||
'rate(prometheus_remote_storage_failed_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local retriedSamples =
|
||||
@ -740,11 +740,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])'
|
||||
'rate(prometheus_remote_storage_retried_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
local enqueueRetries =
|
||||
@ -755,11 +755,11 @@ local row = panel.row;
|
||||
+ panel.timeSeries.queryOptions.withTargets([
|
||||
prometheus.new(
|
||||
'$datasource',
|
||||
'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])'
|
||||
'rate(prometheus_remote_storage_enqueue_retries_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config
|
||||
)
|
||||
+ prometheus.withFormat('time_series')
|
||||
+ prometheus.withIntervalFactor(2)
|
||||
+ prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'),
|
||||
+ prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config),
|
||||
]);
|
||||
|
||||
dashboard.new('%(prefix)sRemote Write' % $._config.grafanaPrometheus)
|
||||
|
||||
20
go.mod
20
go.mod
@ -17,8 +17,8 @@ require (
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/digitalocean/godo v1.132.0
|
||||
github.com/docker/docker v27.4.1+incompatible
|
||||
github.com/digitalocean/godo v1.136.0
|
||||
github.com/docker/docker v27.5.1+incompatible
|
||||
github.com/edsrzf/mmap-go v1.2.0
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.3
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1
|
||||
@ -36,19 +36,19 @@ require (
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.31.0
|
||||
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.18.0
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.19.0
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.2
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.17.11
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
github.com/linode/linodego v1.46.0
|
||||
github.com/linode/linodego v1.47.0
|
||||
github.com/miekg/dns v1.1.63
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.118.0
|
||||
github.com/ovh/go-ovh v1.6.0
|
||||
github.com/prometheus/alertmanager v0.28.0
|
||||
github.com/prometheus/client_golang v1.21.0-rc.0
|
||||
@ -56,7 +56,7 @@ require (
|
||||
github.com/prometheus/common v0.62.0
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/exporter-toolkit v0.13.2
|
||||
github.com/prometheus/sigv4 v0.1.1
|
||||
github.com/prometheus/sigv4 v0.1.2
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
||||
github.com/stretchr/testify v1.10.0
|
||||
@ -84,7 +84,7 @@ require (
|
||||
golang.org/x/sys v0.29.0
|
||||
golang.org/x/text v0.21.0
|
||||
golang.org/x/tools v0.29.0
|
||||
google.golang.org/api v0.218.0
|
||||
google.golang.org/api v0.219.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f
|
||||
google.golang.org/grpc v1.70.0
|
||||
google.golang.org/protobuf v1.36.4
|
||||
@ -170,8 +170,8 @@ require (
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.118.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
@ -196,7 +196,7 @@ require (
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/term v0.28.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
|
||||
48
go.sum
48
go.sum
@ -87,14 +87,14 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/digitalocean/godo v1.132.0 h1:n0x6+ZkwbyQBtIU1wwBhv26EINqHg0wWQiBXlwYg/HQ=
|
||||
github.com/digitalocean/godo v1.132.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
|
||||
github.com/digitalocean/godo v1.136.0 h1:DTxugljFJSMBPfEGq4KeXpnKeAHicggNqogcrw/YdZw=
|
||||
github.com/digitalocean/godo v1.136.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4=
|
||||
github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8=
|
||||
github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@ -273,8 +273,8 @@ github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977Vrm
|
||||
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.18.0 h1:BemrVGeWI8Kn/pvaC1jBsHZxQMnRqOydS7Ju4BERB4Q=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.18.0/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.19.0 h1:crqbWMywudvlPLLczFf2hBpTPIATjrWMmwfiKSTpUt0=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.19.0/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.2 h1:2mUmrZZz6cPyT9IRX0T8fBLc/7XU/eTxP2Y5tS7/09k=
|
||||
@ -321,8 +321,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/linode/linodego v1.46.0 h1:+uOG4SD2MIrhbrLrvOD5HrbdLN3D19Wgn3MgdUNQjeU=
|
||||
github.com/linode/linodego v1.46.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk=
|
||||
github.com/linode/linodego v1.47.0 h1:6MFNCyzWbr8Rhl4r7d5DwZLwxvFIsM4ARH6W0KS/R0U=
|
||||
github.com/linode/linodego v1.47.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
@ -391,14 +391,14 @@ github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 h1:Kxk5Ral+Dc6VB9UmTketVjs+rbMZP8JxQ4SXDx4RivQ=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0/go.mod h1:ctT6oQmGmWGGGgUIKyx2fDwqz77N9+04gqKkDyAzKCg=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0 h1:RlEK9MbxWyBHbLel8EJ1L7DbYVLai9dZL6Ljl2cBgyA=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0/go.mod h1:AVUEyIjPb+0ARr7mhIkZkdNg3fd0ZcRhzAi53oZhl1Q=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 h1:jwnZYRBuPJnsKXE5H6ZvTEm91bXW5VP8+tLewzl54eg=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0/go.mod h1:NT3Ag+DdnIAZQfD7l7OHwlYqnaAJ19SoPZ0nhD9yx4s=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 h1:ZBmLuipJv7BT9fho/2yAFsS8AtMsCOCe4ON8oqkX3n8=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0/go.mod h1:f0GdYWGxUunyRZ088gHnoX78pc/gZc3dQlRtidiGXzg=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.118.0 h1:PbknCwTbeTz8GNSfN4fOIp50YCDO19s1IAp6PGFcdpA=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.118.0/go.mod h1:cOY+YDFtxJH3eQzJDObvWFFSIvD2AstG5MZ9t8wqusQ=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0 h1:DSoYrOjLv23HXpx72hl61br4ZZTj6dqtwZSGoypKWIA=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0/go.mod h1:nR+r7aAbsktscJk4fGmzljblbZBMaiZcIWeKbXV+HmY=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 h1:aUTSkzJExtrlHN32g8hX/cRNEo2ZmucPg+vwPqOYvhg=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0/go.mod h1:a3sewj4nEozMwcNwZTHPzddS+1BnA6BaAkO/CRIGHVU=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.118.0 h1:RZszYLp7sVMOD1rppjY+fP2PQh5qNAh5U6RoQNvd4Rg=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.118.0/go.mod h1:5i928mwS+Ojv41l3/IxcyK1SCy6WnpL3wjLWKDb4YKQ=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
@ -451,8 +451,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/sigv4 v0.1.1 h1:UJxjOqVcXctZlwDjpUpZ2OiMWJdFijgSofwLzO1Xk0Q=
|
||||
github.com/prometheus/sigv4 v0.1.1/go.mod h1:RAmWVKqx0bwi0Qm4lrKMXFM0nhpesBcenfCtz9qRyH8=
|
||||
github.com/prometheus/sigv4 v0.1.2 h1:R7570f8AoM5YnTUPFm3mjZH5q2k4D+I/phCWvZ4PXG8=
|
||||
github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU=
|
||||
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
|
||||
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
@ -511,8 +511,8 @@ go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWS
|
||||
go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU=
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM=
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE=
|
||||
go.opentelemetry.io/collector/confmap v1.22.0 h1:ZKQzRuj5lKu+seKArAAZ1yPRroDPricaIVIREm/jr3w=
|
||||
go.opentelemetry.io/collector/confmap v1.22.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec=
|
||||
go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA=
|
||||
go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec=
|
||||
go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU=
|
||||
go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY=
|
||||
@ -670,8 +670,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.218.0 h1:x6JCjEWeZ9PFCRe9z0FBrNwj7pB7DOAqT35N+IPnAUA=
|
||||
google.golang.org/api v0.218.0/go.mod h1:5VGHBAkxrA/8EFjLVEYmMUJ8/8+gWWQ3s4cFH0FxG2M=
|
||||
google.golang.org/api v0.219.0 h1:nnKIvxKs/06jWawp2liznTBnMRQBEPpGo7I+oEypTX0=
|
||||
google.golang.org/api v0.219.0/go.mod h1:K6OmjGm+NtLrIkHxv1U3a0qIf/0JOvAHd5O/6AoyKYE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
@ -679,8 +679,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47 h1:91mG8dNTpkC0uChJUQ9zCiRqx3GEEFOWaRZ0mI6Oj2I=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
@ -30,32 +31,9 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type newParser func([]byte, *labels.SymbolTable) Parser
|
||||
|
||||
var newTestParserFns = map[string]newParser{
|
||||
"promtext": NewPromParser,
|
||||
"promproto": func(b []byte, st *labels.SymbolTable) Parser {
|
||||
return NewProtobufParser(b, true, st)
|
||||
},
|
||||
"omtext": func(b []byte, st *labels.SymbolTable) Parser {
|
||||
return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped())
|
||||
},
|
||||
"omtext_with_nhcb": func(b []byte, st *labels.SymbolTable) Parser {
|
||||
p := NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped())
|
||||
return NewNHCBParser(p, st, false)
|
||||
},
|
||||
}
|
||||
|
||||
// BenchmarkParse benchmarks parsing, mimicking how scrape/scrape.go#append use it.
|
||||
// Typically used as follows:
|
||||
/*
|
||||
export bench=v1 && go test ./model/textparse/... \
|
||||
-run '^$' -bench '^BenchmarkParse' \
|
||||
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
|
||||
| tee ${bench}.txt
|
||||
*/
|
||||
// For profiles, add -memprofile=${bench}.mem.pprof -cpuprofile=${bench}.cpu.pprof
|
||||
// options.
|
||||
// BenchmarkParse... set of benchmarks analyze efficiency of parsing various
|
||||
// datasets with different parsers. It mimics how scrape/scrape.go#append use parsers
|
||||
// and allows comparison with expfmt decoders if applicable.
|
||||
//
|
||||
// NOTE(bwplotka): Previous iterations of this benchmark had different cases for isolated
|
||||
// Series, Series+Metrics with and without reuse, Series+CT. Those cases are sometimes
|
||||
@ -63,123 +41,224 @@ var newTestParserFns = map[string]newParser{
|
||||
// make sense to persist such cases for everybody (e.g. for CI one day).
|
||||
// For local iteration, feel free to adjust cases/comment out code etc.
|
||||
//
|
||||
// NOTE(bwplotka): Do not try to conclude "what parser (OM, proto, prom) is the fastest"
|
||||
// as the testdata has different amount and type of metrics and features (e.g. exemplars).
|
||||
func BenchmarkParse(b *testing.B) {
|
||||
for _, bcase := range []struct {
|
||||
dataFile string // Localized to "./testdata".
|
||||
dataProto []byte
|
||||
parser string
|
||||
// NOTE(bwplotka): Those benchmarks are purposefully categorized per data-sets,
|
||||
// to avoid temptation to assess "what parser (OM, proto, prom) is the fastest,
|
||||
// in general" here due to not every parser supporting every data set type.
|
||||
// Use scrape.BenchmarkScrapeLoopAppend if you want one benchmark comparing parsers fairly.
|
||||
|
||||
compareToExpfmtFormat expfmt.FormatType
|
||||
}{
|
||||
{dataFile: "promtestdata.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain},
|
||||
{dataFile: "promtestdata.nometa.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain},
|
||||
/*
|
||||
export bench=v1 && go test ./model/textparse/... \
|
||||
-run '^$' -bench '^BenchmarkParsePromText' \
|
||||
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
|
||||
| tee ${bench}.txt
|
||||
*/
|
||||
func BenchmarkParsePromText(b *testing.B) {
|
||||
data := readTestdataFile(b, "alltypes.237mfs.prom.txt")
|
||||
|
||||
// We don't pass compareToExpfmtFormat: expfmt.TypeProtoDelim as expfmt does not support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430.
|
||||
{dataProto: createTestProtoBuf(b).Bytes(), parser: "promproto"},
|
||||
|
||||
// We don't pass compareToExpfmtFormat: expfmt.TypeOpenMetrics as expfmt does not support OM exemplars, see https://github.com/prometheus/common/issues/703.
|
||||
{dataFile: "omtestdata.txt", parser: "omtext"},
|
||||
{dataFile: "promtestdata.txt", parser: "omtext"}, // Compare how omtext parser deals with Prometheus text format vs promtext.
|
||||
|
||||
// NHCB.
|
||||
{dataFile: "omhistogramdata.txt", parser: "omtext"}, // Measure OM parser baseline for histograms.
|
||||
{dataFile: "omhistogramdata.txt", parser: "omtext_with_nhcb"}, // Measure NHCB over OM parser.
|
||||
for _, parser := range []string{
|
||||
"promtext",
|
||||
"omtext", // Compare how omtext parser deals with Prometheus text format.
|
||||
"expfmt-promtext",
|
||||
} {
|
||||
var buf []byte
|
||||
dataCase := bcase.dataFile
|
||||
if len(bcase.dataProto) > 0 {
|
||||
dataCase = "createTestProtoBuf()"
|
||||
buf = bcase.dataProto
|
||||
} else {
|
||||
f, err := os.Open(filepath.Join("testdata", bcase.dataFile))
|
||||
require.NoError(b, err)
|
||||
b.Cleanup(func() {
|
||||
_ = f.Close()
|
||||
})
|
||||
buf, err = io.ReadAll(f)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
b.Run(fmt.Sprintf("data=%v/parser=%v", dataCase, bcase.parser), func(b *testing.B) {
|
||||
newParserFn := newTestParserFns[bcase.parser]
|
||||
var (
|
||||
res labels.Labels
|
||||
e exemplar.Exemplar
|
||||
)
|
||||
|
||||
b.SetBytes(int64(len(buf)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
st := labels.NewSymbolTable()
|
||||
for i := 0; i < b.N; i++ {
|
||||
p := newParserFn(buf, st)
|
||||
|
||||
Inner:
|
||||
for {
|
||||
t, err := p.Next()
|
||||
switch t {
|
||||
case EntryInvalid:
|
||||
if errors.Is(err, io.EOF) {
|
||||
break Inner
|
||||
}
|
||||
b.Fatal(err)
|
||||
case EntryType:
|
||||
_, _ = p.Type()
|
||||
continue
|
||||
case EntryHelp:
|
||||
_, _ = p.Help()
|
||||
continue
|
||||
case EntryUnit:
|
||||
_, _ = p.Unit()
|
||||
continue
|
||||
case EntryComment:
|
||||
continue
|
||||
case EntryHistogram:
|
||||
_, _, _, _ = p.Histogram()
|
||||
case EntrySeries:
|
||||
_, _, _ = p.Series()
|
||||
default:
|
||||
b.Fatal("not implemented entry", t)
|
||||
}
|
||||
|
||||
_ = p.Metric(&res)
|
||||
_ = p.CreatedTimestamp()
|
||||
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run(fmt.Sprintf("data=%v/parser=xpfmt", dataCase), func(b *testing.B) {
|
||||
if bcase.compareToExpfmtFormat == expfmt.TypeUnknown {
|
||||
b.Skip("compareToExpfmtFormat not set")
|
||||
}
|
||||
|
||||
b.SetBytes(int64(len(buf)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
decSamples := make(model.Vector, 0, 50)
|
||||
sdec := expfmt.SampleDecoder{
|
||||
Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(bcase.compareToExpfmtFormat)),
|
||||
Opts: &expfmt.DecodeOptions{
|
||||
Timestamp: model.TimeFromUnixNano(0),
|
||||
},
|
||||
}
|
||||
|
||||
for {
|
||||
if err := sdec.Decode(&decSamples); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
b.Fatal(err)
|
||||
}
|
||||
decSamples = decSamples[:0]
|
||||
}
|
||||
b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) {
|
||||
if strings.HasPrefix(parser, "expfmt-") {
|
||||
benchExpFmt(b, data, parser)
|
||||
} else {
|
||||
benchParse(b, data, parser)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
export bench=v1 && go test ./model/textparse/... \
|
||||
-run '^$' -bench '^BenchmarkParsePromText_NoMeta' \
|
||||
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
|
||||
| tee ${bench}.txt
|
||||
*/
|
||||
func BenchmarkParsePromText_NoMeta(b *testing.B) {
|
||||
data := readTestdataFile(b, "alltypes.237mfs.nometa.prom.txt")
|
||||
|
||||
for _, parser := range []string{
|
||||
"promtext",
|
||||
"expfmt-promtext",
|
||||
} {
|
||||
b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) {
|
||||
if strings.HasPrefix(parser, "expfmt-") {
|
||||
benchExpFmt(b, data, parser)
|
||||
} else {
|
||||
benchParse(b, data, parser)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
export bench=v1 && go test ./model/textparse/... \
|
||||
-run '^$' -bench '^BenchmarkParseOMText' \
|
||||
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
|
||||
| tee ${bench}.txt
|
||||
*/
|
||||
func BenchmarkParseOMText(b *testing.B) {
|
||||
data := readTestdataFile(b, "alltypes.5mfs.om.txt")
|
||||
// TODO(bwplotka): Add comparison with expfmt.TypeOpenMetrics once expfmt
|
||||
// support OM exemplars, see https://github.com/prometheus/common/issues/703.
|
||||
benchParse(b, data, "omtext")
|
||||
}
|
||||
|
||||
/*
|
||||
export bench=v1 && go test ./model/textparse/... \
|
||||
-run '^$' -bench '^BenchmarkParsePromProto' \
|
||||
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
|
||||
| tee ${bench}.txt
|
||||
*/
|
||||
func BenchmarkParsePromProto(b *testing.B) {
|
||||
data := createTestProtoBuf(b).Bytes()
|
||||
// TODO(bwplotka): Add comparison with expfmt.TypeProtoDelim once expfmt
|
||||
// support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430.
|
||||
benchParse(b, data, "promproto")
|
||||
}
|
||||
|
||||
/*
|
||||
export bench=v1 && go test ./model/textparse/... \
|
||||
-run '^$' -bench '^BenchmarkParseOpenMetricsNHCB' \
|
||||
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
|
||||
| tee ${bench}.txt
|
||||
*/
|
||||
func BenchmarkParseOpenMetricsNHCB(b *testing.B) {
|
||||
data := readTestdataFile(b, "1histogram.om.txt")
|
||||
|
||||
for _, parser := range []string{
|
||||
"omtext", // Measure OM parser baseline for histograms.
|
||||
"omtext_with_nhcb", // Measure NHCB over OM parser.
|
||||
} {
|
||||
b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) {
|
||||
benchParse(b, data, parser)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func benchParse(b *testing.B, data []byte, parser string) {
|
||||
type newParser func([]byte, *labels.SymbolTable) Parser
|
||||
|
||||
var newParserFn newParser
|
||||
switch parser {
|
||||
case "promtext":
|
||||
newParserFn = NewPromParser
|
||||
case "promproto":
|
||||
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
|
||||
return NewProtobufParser(b, true, st)
|
||||
}
|
||||
case "omtext":
|
||||
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
|
||||
return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped())
|
||||
}
|
||||
case "omtext_with_nhcb":
|
||||
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
|
||||
p := NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped())
|
||||
return NewNHCBParser(p, st, false)
|
||||
}
|
||||
default:
|
||||
b.Fatal("unknown parser", parser)
|
||||
}
|
||||
|
||||
var (
|
||||
res labels.Labels
|
||||
e exemplar.Exemplar
|
||||
)
|
||||
|
||||
b.SetBytes(int64(len(data)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
st := labels.NewSymbolTable()
|
||||
for i := 0; i < b.N; i++ {
|
||||
p := newParserFn(data, st)
|
||||
|
||||
Inner:
|
||||
for {
|
||||
t, err := p.Next()
|
||||
switch t {
|
||||
case EntryInvalid:
|
||||
if errors.Is(err, io.EOF) {
|
||||
break Inner
|
||||
}
|
||||
b.Fatal(err)
|
||||
case EntryType:
|
||||
_, _ = p.Type()
|
||||
continue
|
||||
case EntryHelp:
|
||||
_, _ = p.Help()
|
||||
continue
|
||||
case EntryUnit:
|
||||
_, _ = p.Unit()
|
||||
continue
|
||||
case EntryComment:
|
||||
continue
|
||||
case EntryHistogram:
|
||||
_, _, _, _ = p.Histogram()
|
||||
case EntrySeries:
|
||||
_, _, _ = p.Series()
|
||||
default:
|
||||
b.Fatal("not implemented entry", t)
|
||||
}
|
||||
|
||||
_ = p.Metric(&res)
|
||||
_ = p.CreatedTimestamp()
|
||||
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchExpFmt(b *testing.B, data []byte, expFormatTypeStr string) {
|
||||
expfmtFormatType := expfmt.TypeUnknown
|
||||
switch expFormatTypeStr {
|
||||
case "expfmt-promtext":
|
||||
expfmtFormatType = expfmt.TypeProtoText
|
||||
case "expfmt-promproto":
|
||||
expfmtFormatType = expfmt.TypeProtoDelim
|
||||
case "expfmt-omtext":
|
||||
expfmtFormatType = expfmt.TypeOpenMetrics
|
||||
default:
|
||||
b.Fatal("unknown expfmt format type", expFormatTypeStr)
|
||||
}
|
||||
|
||||
b.SetBytes(int64(len(data)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
decSamples := make(model.Vector, 0, 50)
|
||||
sdec := expfmt.SampleDecoder{
|
||||
Dec: expfmt.NewDecoder(bytes.NewReader(data), expfmt.NewFormat(expfmtFormatType)),
|
||||
Opts: &expfmt.DecodeOptions{
|
||||
Timestamp: model.TimeFromUnixNano(0),
|
||||
},
|
||||
}
|
||||
|
||||
for {
|
||||
if err := sdec.Decode(&decSamples); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
b.Fatal(err)
|
||||
}
|
||||
decSamples = decSamples[:0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readTestdataFile(tb testing.TB, file string) []byte {
|
||||
tb.Helper()
|
||||
|
||||
f, err := os.Open(filepath.Join("testdata", file))
|
||||
require.NoError(tb, err)
|
||||
|
||||
tb.Cleanup(func() {
|
||||
_ = f.Close()
|
||||
})
|
||||
buf, err := io.ReadAll(f)
|
||||
require.NoError(tb, err)
|
||||
return buf
|
||||
}
|
||||
|
||||
1858
model/textparse/testdata/alltypes.237mfs.nometa.prom.txt
vendored
Normal file
1858
model/textparse/testdata/alltypes.237mfs.nometa.prom.txt
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2332
model/textparse/testdata/alltypes.237mfs.prom.txt
vendored
Normal file
2332
model/textparse/testdata/alltypes.237mfs.prom.txt
vendored
Normal file
File diff suppressed because it is too large
Load Diff
411
model/textparse/testdata/promtestdata.nometa.txt
vendored
411
model/textparse/testdata/promtestdata.nometa.txt
vendored
@ -1,411 +0,0 @@
|
||||
go_gc_duration_seconds{quantile="0"} 4.9351e-05
|
||||
go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
|
||||
go_gc_duration_seconds{quantile="0.5"} 8.3835e-05
|
||||
go_gc_duration_seconds{quantile="0.75"} 0.000106744
|
||||
go_gc_duration_seconds{quantile="1"} 0.002072195
|
||||
go_gc_duration_seconds_sum 0.012139815
|
||||
go_gc_duration_seconds_count 99
|
||||
go_goroutines 33
|
||||
go_memstats_alloc_bytes 1.7518624e+07
|
||||
go_memstats_alloc_bytes_total 8.3062296e+08
|
||||
go_memstats_buck_hash_sys_bytes 1.494637e+06
|
||||
go_memstats_frees_total 4.65658e+06
|
||||
go_memstats_gc_sys_bytes 1.107968e+06
|
||||
go_memstats_heap_alloc_bytes 1.7518624e+07
|
||||
go_memstats_heap_idle_bytes 6.668288e+06
|
||||
go_memstats_heap_inuse_bytes 1.8956288e+07
|
||||
go_memstats_heap_objects 72755
|
||||
go_memstats_heap_released_bytes_total 0
|
||||
go_memstats_heap_sys_bytes 2.5624576e+07
|
||||
go_memstats_last_gc_time_seconds 1.4843955586166437e+09
|
||||
go_memstats_lookups_total 2089
|
||||
go_memstats_mallocs_total 4.729335e+06
|
||||
go_memstats_mcache_inuse_bytes 9600
|
||||
go_memstats_mcache_sys_bytes 16384
|
||||
go_memstats_mspan_inuse_bytes 211520
|
||||
go_memstats_mspan_sys_bytes 245760
|
||||
go_memstats_next_gc_bytes 2.033527e+07
|
||||
go_memstats_other_sys_bytes 2.077323e+06
|
||||
go_memstats_stack_inuse_bytes 1.6384e+06
|
||||
go_memstats_stack_sys_bytes 1.6384e+06
|
||||
go_memstats_sys_bytes 3.2205048e+07
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="alerts"} 0
|
||||
http_request_duration_microseconds_count{handler="alerts"} 0
|
||||
http_request_duration_microseconds{handler="config",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="config",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="config",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="config"} 0
|
||||
http_request_duration_microseconds_count{handler="config"} 0
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="consoles"} 0
|
||||
http_request_duration_microseconds_count{handler="consoles"} 0
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="drop_series"} 0
|
||||
http_request_duration_microseconds_count{handler="drop_series"} 0
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="federate"} 0
|
||||
http_request_duration_microseconds_count{handler="federate"} 0
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="flags"} 0
|
||||
http_request_duration_microseconds_count{handler="flags"} 0
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823
|
||||
http_request_duration_microseconds_sum{handler="graph"} 5803.93
|
||||
http_request_duration_microseconds_count{handler="graph"} 3
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="heap"} 0
|
||||
http_request_duration_microseconds_count{handler="heap"} 0
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708
|
||||
http_request_duration_microseconds_sum{handler="label_values"} 3995.574
|
||||
http_request_duration_microseconds_count{handler="label_values"} 3
|
||||
http_request_duration_microseconds{handler="options",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="options",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="options",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="options"} 0
|
||||
http_request_duration_microseconds_count{handler="options"} 0
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523
|
||||
http_request_duration_microseconds_sum{handler="prometheus"} 661851.54
|
||||
http_request_duration_microseconds_count{handler="prometheus"} 462
|
||||
http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448
|
||||
http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558
|
||||
http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558
|
||||
http_request_duration_microseconds_sum{handler="query"} 26074.11
|
||||
http_request_duration_microseconds_count{handler="query"} 6
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="query_range"} 0
|
||||
http_request_duration_microseconds_count{handler="query_range"} 0
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="rules"} 0
|
||||
http_request_duration_microseconds_count{handler="rules"} 0
|
||||
http_request_duration_microseconds{handler="series",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="series",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="series",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="series"} 0
|
||||
http_request_duration_microseconds_count{handler="series"} 0
|
||||
http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311
|
||||
http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174
|
||||
http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174
|
||||
http_request_duration_microseconds_sum{handler="static"} 6458.621
|
||||
http_request_duration_microseconds_count{handler="static"} 3
|
||||
http_request_duration_microseconds{handler="status",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="status",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="status",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="status"} 0
|
||||
http_request_duration_microseconds_count{handler="status"} 0
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="targets"} 0
|
||||
http_request_duration_microseconds_count{handler="targets"} 0
|
||||
http_request_duration_microseconds{handler="version",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="version",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="version",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="version"} 0
|
||||
http_request_duration_microseconds_count{handler="version"} 0
|
||||
http_request_size_bytes{handler="alerts",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="alerts",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="alerts",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="alerts"} 0
|
||||
http_request_size_bytes_count{handler="alerts"} 0
|
||||
http_request_size_bytes{handler="config",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="config",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="config",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="config"} 0
|
||||
http_request_size_bytes_count{handler="config"} 0
|
||||
http_request_size_bytes{handler="consoles",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="consoles",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="consoles",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="consoles"} 0
|
||||
http_request_size_bytes_count{handler="consoles"} 0
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="drop_series"} 0
|
||||
http_request_size_bytes_count{handler="drop_series"} 0
|
||||
http_request_size_bytes{handler="federate",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="federate",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="federate",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="federate"} 0
|
||||
http_request_size_bytes_count{handler="federate"} 0
|
||||
http_request_size_bytes{handler="flags",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="flags",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="flags",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="flags"} 0
|
||||
http_request_size_bytes_count{handler="flags"} 0
|
||||
http_request_size_bytes{handler="graph",quantile="0.5"} 367
|
||||
http_request_size_bytes{handler="graph",quantile="0.9"} 389
|
||||
http_request_size_bytes{handler="graph",quantile="0.99"} 389
|
||||
http_request_size_bytes_sum{handler="graph"} 1145
|
||||
http_request_size_bytes_count{handler="graph"} 3
|
||||
http_request_size_bytes{handler="heap",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="heap",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="heap",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="heap"} 0
|
||||
http_request_size_bytes_count{handler="heap"} 0
|
||||
http_request_size_bytes{handler="label_values",quantile="0.5"} 416
|
||||
http_request_size_bytes{handler="label_values",quantile="0.9"} 416
|
||||
http_request_size_bytes{handler="label_values",quantile="0.99"} 416
|
||||
http_request_size_bytes_sum{handler="label_values"} 1248
|
||||
http_request_size_bytes_count{handler="label_values"} 3
|
||||
http_request_size_bytes{handler="options",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="options",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="options",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="options"} 0
|
||||
http_request_size_bytes_count{handler="options"} 0
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.5"} 238
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.9"} 238
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.99"} 238
|
||||
http_request_size_bytes_sum{handler="prometheus"} 109956
|
||||
http_request_size_bytes_count{handler="prometheus"} 462
|
||||
http_request_size_bytes{handler="query",quantile="0.5"} 531
|
||||
http_request_size_bytes{handler="query",quantile="0.9"} 531
|
||||
http_request_size_bytes{handler="query",quantile="0.99"} 531
|
||||
http_request_size_bytes_sum{handler="query"} 3186
|
||||
http_request_size_bytes_count{handler="query"} 6
|
||||
http_request_size_bytes{handler="query_range",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="query_range",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="query_range",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="query_range"} 0
|
||||
http_request_size_bytes_count{handler="query_range"} 0
|
||||
http_request_size_bytes{handler="rules",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="rules",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="rules",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="rules"} 0
|
||||
http_request_size_bytes_count{handler="rules"} 0
|
||||
http_request_size_bytes{handler="series",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="series",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="series",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="series"} 0
|
||||
http_request_size_bytes_count{handler="series"} 0
|
||||
http_request_size_bytes{handler="static",quantile="0.5"} 379
|
||||
http_request_size_bytes{handler="static",quantile="0.9"} 379
|
||||
http_request_size_bytes{handler="static",quantile="0.99"} 379
|
||||
http_request_size_bytes_sum{handler="static"} 1137
|
||||
http_request_size_bytes_count{handler="static"} 3
|
||||
http_request_size_bytes{handler="status",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="status",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="status",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="status"} 0
|
||||
http_request_size_bytes_count{handler="status"} 0
|
||||
http_request_size_bytes{handler="targets",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="targets",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="targets",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="targets"} 0
|
||||
http_request_size_bytes_count{handler="targets"} 0
|
||||
http_request_size_bytes{handler="version",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="version",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="version",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="version"} 0
|
||||
http_request_size_bytes_count{handler="version"} 0
|
||||
http_requests_total{code="200",handler="graph",method="get"} 3
|
||||
http_requests_total{code="200",handler="label_values",method="get"} 3
|
||||
http_requests_total{code="200",handler="prometheus",method="get"} 462
|
||||
http_requests_total{code="200",handler="query",method="get"} 6
|
||||
http_requests_total{code="200",handler="static",method="get"} 3
|
||||
http_response_size_bytes{handler="alerts",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="alerts",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="alerts",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="alerts"} 0
|
||||
http_response_size_bytes_count{handler="alerts"} 0
|
||||
http_response_size_bytes{handler="config",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="config",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="config",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="config"} 0
|
||||
http_response_size_bytes_count{handler="config"} 0
|
||||
http_response_size_bytes{handler="consoles",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="consoles",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="consoles",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="consoles"} 0
|
||||
http_response_size_bytes_count{handler="consoles"} 0
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="drop_series"} 0
|
||||
http_response_size_bytes_count{handler="drop_series"} 0
|
||||
http_response_size_bytes{handler="federate",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="federate",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="federate",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="federate"} 0
|
||||
http_response_size_bytes_count{handler="federate"} 0
|
||||
http_response_size_bytes{handler="flags",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="flags",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="flags",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="flags"} 0
|
||||
http_response_size_bytes_count{handler="flags"} 0
|
||||
http_response_size_bytes{handler="graph",quantile="0.5"} 3619
|
||||
http_response_size_bytes{handler="graph",quantile="0.9"} 3619
|
||||
http_response_size_bytes{handler="graph",quantile="0.99"} 3619
|
||||
http_response_size_bytes_sum{handler="graph"} 10857
|
||||
http_response_size_bytes_count{handler="graph"} 3
|
||||
http_response_size_bytes{handler="heap",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="heap",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="heap",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="heap"} 0
|
||||
http_response_size_bytes_count{handler="heap"} 0
|
||||
http_response_size_bytes{handler="label_values",quantile="0.5"} 642
|
||||
http_response_size_bytes{handler="label_values",quantile="0.9"} 642
|
||||
http_response_size_bytes{handler="label_values",quantile="0.99"} 642
|
||||
http_response_size_bytes_sum{handler="label_values"} 1926
|
||||
http_response_size_bytes_count{handler="label_values"} 3
|
||||
http_response_size_bytes{handler="options",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="options",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="options",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="options"} 0
|
||||
http_response_size_bytes_count{handler="options"} 0
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128
|
||||
http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06
|
||||
http_response_size_bytes_count{handler="prometheus"} 462
|
||||
http_response_size_bytes{handler="query",quantile="0.5"} 776
|
||||
http_response_size_bytes{handler="query",quantile="0.9"} 781
|
||||
http_response_size_bytes{handler="query",quantile="0.99"} 781
|
||||
http_response_size_bytes_sum{handler="query"} 4656
|
||||
http_response_size_bytes_count{handler="query"} 6
|
||||
http_response_size_bytes{handler="query_range",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="query_range",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="query_range",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="query_range"} 0
|
||||
http_response_size_bytes_count{handler="query_range"} 0
|
||||
http_response_size_bytes{handler="rules",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="rules",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="rules",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="rules"} 0
|
||||
http_response_size_bytes_count{handler="rules"} 0
|
||||
http_response_size_bytes{handler="series",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="series",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="series",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="series"} 0
|
||||
http_response_size_bytes_count{handler="series"} 0
|
||||
http_response_size_bytes{handler="static",quantile="0.5"} 6316
|
||||
http_response_size_bytes{handler="static",quantile="0.9"} 6316
|
||||
http_response_size_bytes{handler="static",quantile="0.99"} 6316
|
||||
http_response_size_bytes_sum{handler="static"} 18948
|
||||
http_response_size_bytes_count{handler="static"} 3
|
||||
http_response_size_bytes{handler="status",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="status",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="status",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="status"} 0
|
||||
http_response_size_bytes_count{handler="status"} 0
|
||||
http_response_size_bytes{handler="targets",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="targets",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="targets",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="targets"} 0
|
||||
http_response_size_bytes_count{handler="targets"} 0
|
||||
http_response_size_bytes{handler="version",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="version",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="version",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="version"} 0
|
||||
http_response_size_bytes_count{handler="version"} 0
|
||||
prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1
|
||||
prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09
|
||||
prometheus_config_last_reload_successful 1
|
||||
prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds_count 1
|
||||
prometheus_evaluator_iterations_skipped_total 0
|
||||
prometheus_notifications_dropped_total 0
|
||||
prometheus_notifications_queue_capacity 10000
|
||||
prometheus_notifications_queue_length 0
|
||||
prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0
|
||||
prometheus_rule_evaluation_failures_total{rule_type="recording"} 0
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_azure_refresh_duration_seconds_count 0
|
||||
prometheus_sd_azure_refresh_failures_total 0
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_failures_total 0
|
||||
prometheus_sd_dns_lookup_failures_total 0
|
||||
prometheus_sd_dns_lookups_total 0
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_ec2_refresh_duration_seconds_count 0
|
||||
prometheus_sd_ec2_refresh_failures_total 0
|
||||
prometheus_sd_file_read_errors_total 0
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds_sum 0
|
||||
prometheus_sd_file_scan_duration_seconds_count 0
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN
|
||||
prometheus_sd_gce_refresh_duration_sum 0
|
||||
prometheus_sd_gce_refresh_duration_count 0
|
||||
prometheus_sd_gce_refresh_failures_total 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="service"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="service"} 0
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_marathon_refresh_duration_seconds_count 0
|
||||
prometheus_sd_marathon_refresh_failures_total 0
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006
|
||||
prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995
|
||||
prometheus_target_interval_length_seconds_count{interval="50ms"} 685
|
||||
prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1
|
||||
prometheus_target_skipped_scrapes_total 0
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1
|
||||
prometheus_treecache_watcher_goroutines 0
|
||||
prometheus_treecache_zookeeper_failures_total 0
|
||||
# EOF
|
||||
529
model/textparse/testdata/promtestdata.txt
vendored
529
model/textparse/testdata/promtestdata.txt
vendored
@ -1,529 +0,0 @@
|
||||
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
go_gc_duration_seconds{quantile="0"} 4.9351e-05
|
||||
go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
|
||||
go_gc_duration_seconds{quantile="0.5"} 8.3835e-05
|
||||
go_gc_duration_seconds{quantile="0.75"} 0.000106744
|
||||
go_gc_duration_seconds{quantile="1"} 0.002072195
|
||||
go_gc_duration_seconds_sum 0.012139815
|
||||
go_gc_duration_seconds_count 99
|
||||
# HELP go_goroutines Number of goroutines that currently exist.
|
||||
# TYPE go_goroutines gauge
|
||||
go_goroutines 33
|
||||
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
|
||||
# TYPE go_memstats_alloc_bytes gauge
|
||||
go_memstats_alloc_bytes 1.7518624e+07
|
||||
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
|
||||
# TYPE go_memstats_alloc_bytes_total counter
|
||||
go_memstats_alloc_bytes_total 8.3062296e+08
|
||||
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
|
||||
# TYPE go_memstats_buck_hash_sys_bytes gauge
|
||||
go_memstats_buck_hash_sys_bytes 1.494637e+06
|
||||
# HELP go_memstats_frees_total Total number of frees.
|
||||
# TYPE go_memstats_frees_total counter
|
||||
go_memstats_frees_total 4.65658e+06
|
||||
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
|
||||
# TYPE go_memstats_gc_sys_bytes gauge
|
||||
go_memstats_gc_sys_bytes 1.107968e+06
|
||||
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
|
||||
# TYPE go_memstats_heap_alloc_bytes gauge
|
||||
go_memstats_heap_alloc_bytes 1.7518624e+07
|
||||
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
|
||||
# TYPE go_memstats_heap_idle_bytes gauge
|
||||
go_memstats_heap_idle_bytes 6.668288e+06
|
||||
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
|
||||
# TYPE go_memstats_heap_inuse_bytes gauge
|
||||
go_memstats_heap_inuse_bytes 1.8956288e+07
|
||||
# HELP go_memstats_heap_objects Number of allocated objects.
|
||||
# TYPE go_memstats_heap_objects gauge
|
||||
go_memstats_heap_objects 72755
|
||||
# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS.
|
||||
# TYPE go_memstats_heap_released_bytes_total counter
|
||||
go_memstats_heap_released_bytes_total 0
|
||||
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
|
||||
# TYPE go_memstats_heap_sys_bytes gauge
|
||||
go_memstats_heap_sys_bytes 2.5624576e+07
|
||||
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
|
||||
# TYPE go_memstats_last_gc_time_seconds gauge
|
||||
go_memstats_last_gc_time_seconds 1.4843955586166437e+09
|
||||
# HELP go_memstats_lookups_total Total number of pointer lookups.
|
||||
# TYPE go_memstats_lookups_total counter
|
||||
go_memstats_lookups_total 2089
|
||||
# HELP go_memstats_mallocs_total Total number of mallocs.
|
||||
# TYPE go_memstats_mallocs_total counter
|
||||
go_memstats_mallocs_total 4.729335e+06
|
||||
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
|
||||
# TYPE go_memstats_mcache_inuse_bytes gauge
|
||||
go_memstats_mcache_inuse_bytes 9600
|
||||
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
|
||||
# TYPE go_memstats_mcache_sys_bytes gauge
|
||||
go_memstats_mcache_sys_bytes 16384
|
||||
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
|
||||
# TYPE go_memstats_mspan_inuse_bytes gauge
|
||||
go_memstats_mspan_inuse_bytes 211520
|
||||
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
|
||||
# TYPE go_memstats_mspan_sys_bytes gauge
|
||||
go_memstats_mspan_sys_bytes 245760
|
||||
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
|
||||
# TYPE go_memstats_next_gc_bytes gauge
|
||||
go_memstats_next_gc_bytes 2.033527e+07
|
||||
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
|
||||
# TYPE go_memstats_other_sys_bytes gauge
|
||||
go_memstats_other_sys_bytes 2.077323e+06
|
||||
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
|
||||
# TYPE go_memstats_stack_inuse_bytes gauge
|
||||
go_memstats_stack_inuse_bytes 1.6384e+06
|
||||
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
|
||||
# TYPE go_memstats_stack_sys_bytes gauge
|
||||
go_memstats_stack_sys_bytes 1.6384e+06
|
||||
# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations.
|
||||
# TYPE go_memstats_sys_bytes gauge
|
||||
go_memstats_sys_bytes 3.2205048e+07
|
||||
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
|
||||
# TYPE http_request_duration_microseconds summary
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="alerts"} 0
|
||||
http_request_duration_microseconds_count{handler="alerts"} 0
|
||||
http_request_duration_microseconds{handler="config",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="config",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="config",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="config"} 0
|
||||
http_request_duration_microseconds_count{handler="config"} 0
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="consoles"} 0
|
||||
http_request_duration_microseconds_count{handler="consoles"} 0
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="drop_series"} 0
|
||||
http_request_duration_microseconds_count{handler="drop_series"} 0
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="federate"} 0
|
||||
http_request_duration_microseconds_count{handler="federate"} 0
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="flags"} 0
|
||||
http_request_duration_microseconds_count{handler="flags"} 0
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823
|
||||
http_request_duration_microseconds_sum{handler="graph"} 5803.93
|
||||
http_request_duration_microseconds_count{handler="graph"} 3
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="heap"} 0
|
||||
http_request_duration_microseconds_count{handler="heap"} 0
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708
|
||||
http_request_duration_microseconds_sum{handler="label_values"} 3995.574
|
||||
http_request_duration_microseconds_count{handler="label_values"} 3
|
||||
http_request_duration_microseconds{handler="options",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="options",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="options",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="options"} 0
|
||||
http_request_duration_microseconds_count{handler="options"} 0
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523
|
||||
http_request_duration_microseconds_sum{handler="prometheus"} 661851.54
|
||||
http_request_duration_microseconds_count{handler="prometheus"} 462
|
||||
http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448
|
||||
http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558
|
||||
http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558
|
||||
http_request_duration_microseconds_sum{handler="query"} 26074.11
|
||||
http_request_duration_microseconds_count{handler="query"} 6
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="query_range"} 0
|
||||
http_request_duration_microseconds_count{handler="query_range"} 0
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="rules"} 0
|
||||
http_request_duration_microseconds_count{handler="rules"} 0
|
||||
http_request_duration_microseconds{handler="series",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="series",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="series",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="series"} 0
|
||||
http_request_duration_microseconds_count{handler="series"} 0
|
||||
http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311
|
||||
http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174
|
||||
http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174
|
||||
http_request_duration_microseconds_sum{handler="static"} 6458.621
|
||||
http_request_duration_microseconds_count{handler="static"} 3
|
||||
http_request_duration_microseconds{handler="status",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="status",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="status",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="status"} 0
|
||||
http_request_duration_microseconds_count{handler="status"} 0
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="targets"} 0
|
||||
http_request_duration_microseconds_count{handler="targets"} 0
|
||||
http_request_duration_microseconds{handler="version",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="version",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="version",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="version"} 0
|
||||
http_request_duration_microseconds_count{handler="version"} 0
|
||||
# HELP http_request_size_bytes The HTTP request sizes in bytes.
|
||||
# TYPE http_request_size_bytes summary
|
||||
http_request_size_bytes{handler="alerts",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="alerts",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="alerts",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="alerts"} 0
|
||||
http_request_size_bytes_count{handler="alerts"} 0
|
||||
http_request_size_bytes{handler="config",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="config",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="config",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="config"} 0
|
||||
http_request_size_bytes_count{handler="config"} 0
|
||||
http_request_size_bytes{handler="consoles",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="consoles",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="consoles",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="consoles"} 0
|
||||
http_request_size_bytes_count{handler="consoles"} 0
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="drop_series"} 0
|
||||
http_request_size_bytes_count{handler="drop_series"} 0
|
||||
http_request_size_bytes{handler="federate",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="federate",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="federate",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="federate"} 0
|
||||
http_request_size_bytes_count{handler="federate"} 0
|
||||
http_request_size_bytes{handler="flags",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="flags",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="flags",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="flags"} 0
|
||||
http_request_size_bytes_count{handler="flags"} 0
|
||||
http_request_size_bytes{handler="graph",quantile="0.5"} 367
|
||||
http_request_size_bytes{handler="graph",quantile="0.9"} 389
|
||||
http_request_size_bytes{handler="graph",quantile="0.99"} 389
|
||||
http_request_size_bytes_sum{handler="graph"} 1145
|
||||
http_request_size_bytes_count{handler="graph"} 3
|
||||
http_request_size_bytes{handler="heap",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="heap",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="heap",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="heap"} 0
|
||||
http_request_size_bytes_count{handler="heap"} 0
|
||||
http_request_size_bytes{handler="label_values",quantile="0.5"} 416
|
||||
http_request_size_bytes{handler="label_values",quantile="0.9"} 416
|
||||
http_request_size_bytes{handler="label_values",quantile="0.99"} 416
|
||||
http_request_size_bytes_sum{handler="label_values"} 1248
|
||||
http_request_size_bytes_count{handler="label_values"} 3
|
||||
http_request_size_bytes{handler="options",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="options",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="options",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="options"} 0
|
||||
http_request_size_bytes_count{handler="options"} 0
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.5"} 238
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.9"} 238
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.99"} 238
|
||||
http_request_size_bytes_sum{handler="prometheus"} 109956
|
||||
http_request_size_bytes_count{handler="prometheus"} 462
|
||||
http_request_size_bytes{handler="query",quantile="0.5"} 531
|
||||
http_request_size_bytes{handler="query",quantile="0.9"} 531
|
||||
http_request_size_bytes{handler="query",quantile="0.99"} 531
|
||||
http_request_size_bytes_sum{handler="query"} 3186
|
||||
http_request_size_bytes_count{handler="query"} 6
|
||||
http_request_size_bytes{handler="query_range",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="query_range",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="query_range",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="query_range"} 0
|
||||
http_request_size_bytes_count{handler="query_range"} 0
|
||||
http_request_size_bytes{handler="rules",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="rules",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="rules",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="rules"} 0
|
||||
http_request_size_bytes_count{handler="rules"} 0
|
||||
http_request_size_bytes{handler="series",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="series",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="series",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="series"} 0
|
||||
http_request_size_bytes_count{handler="series"} 0
|
||||
http_request_size_bytes{handler="static",quantile="0.5"} 379
|
||||
http_request_size_bytes{handler="static",quantile="0.9"} 379
|
||||
http_request_size_bytes{handler="static",quantile="0.99"} 379
|
||||
http_request_size_bytes_sum{handler="static"} 1137
|
||||
http_request_size_bytes_count{handler="static"} 3
|
||||
http_request_size_bytes{handler="status",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="status",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="status",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="status"} 0
|
||||
http_request_size_bytes_count{handler="status"} 0
|
||||
http_request_size_bytes{handler="targets",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="targets",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="targets",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="targets"} 0
|
||||
http_request_size_bytes_count{handler="targets"} 0
|
||||
http_request_size_bytes{handler="version",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="version",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="version",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="version"} 0
|
||||
http_request_size_bytes_count{handler="version"} 0
|
||||
# HELP http_requests_total Total number of HTTP requests made.
|
||||
# TYPE http_requests_total counter
|
||||
http_requests_total{code="200",handler="graph",method="get"} 3
|
||||
http_requests_total{code="200",handler="label_values",method="get"} 3
|
||||
http_requests_total{code="200",handler="prometheus",method="get"} 462
|
||||
http_requests_total{code="200",handler="query",method="get"} 6
|
||||
http_requests_total{code="200",handler="static",method="get"} 3
|
||||
# HELP http_response_size_bytes The HTTP response sizes in bytes.
|
||||
# TYPE http_response_size_bytes summary
|
||||
http_response_size_bytes{handler="alerts",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="alerts",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="alerts",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="alerts"} 0
|
||||
http_response_size_bytes_count{handler="alerts"} 0
|
||||
http_response_size_bytes{handler="config",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="config",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="config",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="config"} 0
|
||||
http_response_size_bytes_count{handler="config"} 0
|
||||
http_response_size_bytes{handler="consoles",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="consoles",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="consoles",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="consoles"} 0
|
||||
http_response_size_bytes_count{handler="consoles"} 0
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="drop_series"} 0
|
||||
http_response_size_bytes_count{handler="drop_series"} 0
|
||||
http_response_size_bytes{handler="federate",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="federate",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="federate",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="federate"} 0
|
||||
http_response_size_bytes_count{handler="federate"} 0
|
||||
http_response_size_bytes{handler="flags",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="flags",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="flags",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="flags"} 0
|
||||
http_response_size_bytes_count{handler="flags"} 0
|
||||
http_response_size_bytes{handler="graph",quantile="0.5"} 3619
|
||||
http_response_size_bytes{handler="graph",quantile="0.9"} 3619
|
||||
http_response_size_bytes{handler="graph",quantile="0.99"} 3619
|
||||
http_response_size_bytes_sum{handler="graph"} 10857
|
||||
http_response_size_bytes_count{handler="graph"} 3
|
||||
http_response_size_bytes{handler="heap",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="heap",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="heap",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="heap"} 0
|
||||
http_response_size_bytes_count{handler="heap"} 0
|
||||
http_response_size_bytes{handler="label_values",quantile="0.5"} 642
|
||||
http_response_size_bytes{handler="label_values",quantile="0.9"} 642
|
||||
http_response_size_bytes{handler="label_values",quantile="0.99"} 642
|
||||
http_response_size_bytes_sum{handler="label_values"} 1926
|
||||
http_response_size_bytes_count{handler="label_values"} 3
|
||||
http_response_size_bytes{handler="options",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="options",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="options",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="options"} 0
|
||||
http_response_size_bytes_count{handler="options"} 0
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128
|
||||
http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06
|
||||
http_response_size_bytes_count{handler="prometheus"} 462
|
||||
http_response_size_bytes{handler="query",quantile="0.5"} 776
|
||||
http_response_size_bytes{handler="query",quantile="0.9"} 781
|
||||
http_response_size_bytes{handler="query",quantile="0.99"} 781
|
||||
http_response_size_bytes_sum{handler="query"} 4656
|
||||
http_response_size_bytes_count{handler="query"} 6
|
||||
http_response_size_bytes{handler="query_range",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="query_range",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="query_range",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="query_range"} 0
|
||||
http_response_size_bytes_count{handler="query_range"} 0
|
||||
http_response_size_bytes{handler="rules",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="rules",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="rules",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="rules"} 0
|
||||
http_response_size_bytes_count{handler="rules"} 0
|
||||
http_response_size_bytes{handler="series",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="series",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="series",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="series"} 0
|
||||
http_response_size_bytes_count{handler="series"} 0
|
||||
http_response_size_bytes{handler="static",quantile="0.5"} 6316
|
||||
http_response_size_bytes{handler="static",quantile="0.9"} 6316
|
||||
http_response_size_bytes{handler="static",quantile="0.99"} 6316
|
||||
http_response_size_bytes_sum{handler="static"} 18948
|
||||
http_response_size_bytes_count{handler="static"} 3
|
||||
http_response_size_bytes{handler="status",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="status",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="status",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="status"} 0
|
||||
http_response_size_bytes_count{handler="status"} 0
|
||||
http_response_size_bytes{handler="targets",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="targets",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="targets",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="targets"} 0
|
||||
http_response_size_bytes_count{handler="targets"} 0
|
||||
http_response_size_bytes{handler="version",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="version",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="version",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="version"} 0
|
||||
http_response_size_bytes_count{handler="version"} 0
|
||||
# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built.
|
||||
# TYPE prometheus_build_info gauge
|
||||
prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1
|
||||
# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
|
||||
# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge
|
||||
prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09
|
||||
# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful.
|
||||
# TYPE prometheus_config_last_reload_successful gauge
|
||||
prometheus_config_last_reload_successful 1
|
||||
# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations.
|
||||
# TYPE prometheus_evaluator_duration_seconds summary
|
||||
prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds_count 1
|
||||
# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage.
|
||||
# TYPE prometheus_evaluator_iterations_skipped_total counter
|
||||
prometheus_evaluator_iterations_skipped_total 0
|
||||
# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration.
|
||||
# TYPE prometheus_notifications_dropped_total counter
|
||||
prometheus_notifications_dropped_total 0
|
||||
# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
|
||||
# TYPE prometheus_notifications_queue_capacity gauge
|
||||
prometheus_notifications_queue_capacity 10000
|
||||
# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
|
||||
# TYPE prometheus_notifications_queue_length gauge
|
||||
prometheus_notifications_queue_length 0
|
||||
# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
|
||||
# TYPE prometheus_rule_evaluation_failures_total counter
|
||||
prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0
|
||||
prometheus_rule_evaluation_failures_total{rule_type="recording"} 0
|
||||
# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds.
|
||||
# TYPE prometheus_sd_azure_refresh_duration_seconds summary
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_azure_refresh_duration_seconds_count 0
|
||||
# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures.
|
||||
# TYPE prometheus_sd_azure_refresh_failures_total counter
|
||||
prometheus_sd_azure_refresh_failures_total 0
|
||||
# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds.
|
||||
# TYPE prometheus_sd_consul_rpc_duration_seconds summary
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0
|
||||
# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures.
|
||||
# TYPE prometheus_sd_consul_rpc_failures_total counter
|
||||
prometheus_sd_consul_rpc_failures_total 0
|
||||
# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures.
|
||||
# TYPE prometheus_sd_dns_lookup_failures_total counter
|
||||
prometheus_sd_dns_lookup_failures_total 0
|
||||
# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups.
|
||||
# TYPE prometheus_sd_dns_lookups_total counter
|
||||
prometheus_sd_dns_lookups_total 0
|
||||
# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds.
|
||||
# TYPE prometheus_sd_ec2_refresh_duration_seconds summary
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_ec2_refresh_duration_seconds_count 0
|
||||
# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures.
|
||||
# TYPE prometheus_sd_ec2_refresh_failures_total counter
|
||||
prometheus_sd_ec2_refresh_failures_total 0
|
||||
# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors.
|
||||
# TYPE prometheus_sd_file_read_errors_total counter
|
||||
prometheus_sd_file_read_errors_total 0
|
||||
# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds.
|
||||
# TYPE prometheus_sd_file_scan_duration_seconds summary
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds_sum 0
|
||||
prometheus_sd_file_scan_duration_seconds_count 0
|
||||
# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds.
|
||||
# TYPE prometheus_sd_gce_refresh_duration summary
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN
|
||||
prometheus_sd_gce_refresh_duration_sum 0
|
||||
prometheus_sd_gce_refresh_duration_count 0
|
||||
# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures.
|
||||
# TYPE prometheus_sd_gce_refresh_failures_total counter
|
||||
prometheus_sd_gce_refresh_failures_total 0
|
||||
# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled.
|
||||
# TYPE prometheus_sd_kubernetes_events_total counter
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="service"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="service"} 0
|
||||
# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds.
|
||||
# TYPE prometheus_sd_marathon_refresh_duration_seconds summary
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_marathon_refresh_duration_seconds_count 0
|
||||
# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures.
|
||||
# TYPE prometheus_sd_marathon_refresh_failures_total counter
|
||||
prometheus_sd_marathon_refresh_failures_total 0
|
||||
# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
|
||||
# TYPE prometheus_target_interval_length_seconds summary
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006
|
||||
prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995
|
||||
prometheus_target_interval_length_seconds_count{interval="50ms"} 685
|
||||
# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool.
|
||||
# TYPE prometheus_target_scrape_pool_sync_total counter
|
||||
prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1
|
||||
# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled.
|
||||
# TYPE prometheus_target_skipped_scrapes_total counter
|
||||
prometheus_target_skipped_scrapes_total 0
|
||||
# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool.
|
||||
# TYPE prometheus_target_sync_length_seconds summary
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1
|
||||
# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines.
|
||||
# TYPE prometheus_treecache_watcher_goroutines gauge
|
||||
prometheus_treecache_watcher_goroutines 0
|
||||
# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures.
|
||||
# TYPE prometheus_treecache_zookeeper_failures_total counter
|
||||
prometheus_treecache_zookeeper_failures_total 0
|
||||
# EOF
|
||||
@ -2495,6 +2495,11 @@ func (ev *evaluator) matrixIterSlice(
|
||||
}
|
||||
}
|
||||
|
||||
if mint == maxt {
|
||||
// Empty range: return the empty slices.
|
||||
return floats, histograms
|
||||
}
|
||||
|
||||
soughtValueType := it.Seek(maxt)
|
||||
if soughtValueType == chunkenc.ValNone {
|
||||
if it.Err() != nil {
|
||||
|
||||
@ -1900,6 +1900,15 @@ func TestSubquerySelector(t *testing.T) {
|
||||
},
|
||||
Start: time.Unix(35, 0),
|
||||
},
|
||||
{
|
||||
Query: "metric[0:10s]",
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{},
|
||||
nil,
|
||||
},
|
||||
Start: time.Unix(10, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -3199,6 +3208,7 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
|
||||
load 1m
|
||||
some_metric{env="1"} 0+1x4
|
||||
some_metric{env="2"} 0+2x4
|
||||
some_metric{env="3"} {{count:0}}+{{count:1}}x4
|
||||
some_metric_with_stale_marker 0 1 stale 3
|
||||
`)
|
||||
t.Cleanup(func() { require.NoError(t, storage.Close()) })
|
||||
@ -3226,6 +3236,13 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
|
||||
{T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 4},
|
||||
},
|
||||
},
|
||||
{
|
||||
Metric: labels.FromStrings("__name__", "some_metric", "env", "3"),
|
||||
Histograms: []promql.HPoint{
|
||||
{T: timestamp.FromTime(baseT.Add(time.Minute)), H: &histogram.FloatHistogram{Count: 1, CounterResetHint: histogram.NotCounterReset}},
|
||||
{T: timestamp.FromTime(baseT.Add(2 * time.Minute)), H: &histogram.FloatHistogram{Count: 2, CounterResetHint: histogram.NotCounterReset}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"matches no series": {
|
||||
@ -3251,6 +3268,11 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"matches series but range is 0": {
|
||||
expr: "some_metric[0]",
|
||||
ts: baseT.Add(2 * time.Minute),
|
||||
expected: promql.Matrix{},
|
||||
},
|
||||
}
|
||||
|
||||
for name, testCase := range testCases {
|
||||
|
||||
@ -187,35 +187,48 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
||||
// not a histogram, and a warning wrapped in an annotation in that case.
|
||||
// Otherwise, it returns the calculated histogram and an empty annotation.
|
||||
func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) {
|
||||
prev := points[0].H
|
||||
usingCustomBuckets := prev.UsesCustomBuckets()
|
||||
last := points[len(points)-1].H
|
||||
var (
|
||||
prev = points[0].H
|
||||
usingCustomBuckets = prev.UsesCustomBuckets()
|
||||
last = points[len(points)-1].H
|
||||
annos annotations.Annotations
|
||||
)
|
||||
|
||||
if last == nil {
|
||||
return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
|
||||
return nil, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
|
||||
}
|
||||
|
||||
minSchema := prev.Schema
|
||||
if last.Schema < minSchema {
|
||||
minSchema = last.Schema
|
||||
// We check for gauge type histograms in the loop below, but the loop
|
||||
// below does not run on the first and last point, so check the first
|
||||
// and last point now.
|
||||
if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) {
|
||||
annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos))
|
||||
}
|
||||
|
||||
// Null out the 1st sample if there is a counter reset between the 1st
|
||||
// and 2nd. In this case, we want to ignore any incompatibility in the
|
||||
// bucket layout of the 1st sample because we do not need to look at it.
|
||||
if isCounter && len(points) > 1 {
|
||||
second := points[1].H
|
||||
if second != nil && second.DetectReset(prev) {
|
||||
prev = &histogram.FloatHistogram{}
|
||||
prev.Schema = second.Schema
|
||||
prev.CustomValues = second.CustomValues
|
||||
usingCustomBuckets = second.UsesCustomBuckets()
|
||||
}
|
||||
}
|
||||
|
||||
if last.UsesCustomBuckets() != usingCustomBuckets {
|
||||
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
|
||||
}
|
||||
|
||||
var annos annotations.Annotations
|
||||
|
||||
// We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point,
|
||||
// so check the first and last point now.
|
||||
if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) {
|
||||
annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos))
|
||||
return nil, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
|
||||
}
|
||||
|
||||
// First iteration to find out two things:
|
||||
// - What's the smallest relevant schema?
|
||||
// - Are all data points histograms?
|
||||
// TODO(beorn7): Find a way to check that earlier, e.g. by handing in a
|
||||
// []FloatPoint and a []HistogramPoint separately.
|
||||
minSchema := prev.Schema
|
||||
if last.Schema < minSchema {
|
||||
minSchema = last.Schema
|
||||
}
|
||||
for _, currPoint := range points[1 : len(points)-1] {
|
||||
curr := currPoint.H
|
||||
if curr == nil {
|
||||
@ -473,11 +486,22 @@ func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions
|
||||
return append(enh.Out, Sample{F: s1}), nil
|
||||
}
|
||||
|
||||
// filterFloats filters out histogram samples from the vector in-place.
|
||||
func filterFloats(v Vector) Vector {
|
||||
floats := v[:0]
|
||||
for _, s := range v {
|
||||
if s.H == nil {
|
||||
floats = append(floats, s)
|
||||
}
|
||||
}
|
||||
return floats
|
||||
}
|
||||
|
||||
// === sort(node parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
// NaN should sort to the bottom, so take descending sort with NaN first and
|
||||
// reverse it.
|
||||
byValueSorter := vectorByReverseValueHeap(vals[0].(Vector))
|
||||
byValueSorter := vectorByReverseValueHeap(filterFloats(vals[0].(Vector)))
|
||||
sort.Sort(sort.Reverse(byValueSorter))
|
||||
return Vector(byValueSorter), nil
|
||||
}
|
||||
@ -486,7 +510,7 @@ func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
|
||||
func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
// NaN should sort to the bottom, so take ascending sort with NaN first and
|
||||
// reverse it.
|
||||
byValueSorter := vectorByValueHeap(vals[0].(Vector))
|
||||
byValueSorter := vectorByValueHeap(filterFloats(vals[0].(Vector)))
|
||||
sort.Sort(sort.Reverse(byValueSorter))
|
||||
return Vector(byValueSorter), nil
|
||||
}
|
||||
@ -618,11 +642,27 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
|
||||
|
||||
// === Scalar(node parser.ValueTypeVector) Scalar ===
|
||||
func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
v := vals[0].(Vector)
|
||||
if len(v) != 1 {
|
||||
var (
|
||||
v = vals[0].(Vector)
|
||||
value float64
|
||||
found bool
|
||||
)
|
||||
|
||||
for _, s := range v {
|
||||
if s.H == nil {
|
||||
if found {
|
||||
// More than one float found, return NaN.
|
||||
return append(enh.Out, Sample{F: math.NaN()}), nil
|
||||
}
|
||||
found = true
|
||||
value = s.F
|
||||
}
|
||||
}
|
||||
// Return the single float if found, otherwise return NaN.
|
||||
if !found {
|
||||
return append(enh.Out, Sample{F: math.NaN()}), nil
|
||||
}
|
||||
return append(enh.Out, Sample{F: v[0].F}), nil
|
||||
return append(enh.Out, Sample{F: value}), nil
|
||||
}
|
||||
|
||||
func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector {
|
||||
@ -1612,7 +1652,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr))
|
||||
}
|
||||
if !model.LabelNameRE.MatchString(dst) {
|
||||
if !model.LabelName(dst).IsValid() {
|
||||
panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst))
|
||||
}
|
||||
|
||||
@ -1689,6 +1729,9 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions)
|
||||
matrix[i].DropName = el.DropName
|
||||
}
|
||||
}
|
||||
if matrix.ContainsSameLabelset() {
|
||||
ev.errorf("vector cannot contain metrics with the same labelset")
|
||||
}
|
||||
|
||||
return matrix, ws
|
||||
}
|
||||
@ -1880,16 +1923,7 @@ func (s vectorByValueHeap) Len() int {
|
||||
}
|
||||
|
||||
func (s vectorByValueHeap) Less(i, j int) bool {
|
||||
// We compare histograms based on their sum of observations.
|
||||
// TODO(beorn7): Is that what we want?
|
||||
vi, vj := s[i].F, s[j].F
|
||||
if s[i].H != nil {
|
||||
vi = s[i].H.Sum
|
||||
}
|
||||
if s[j].H != nil {
|
||||
vj = s[j].H.Sum
|
||||
}
|
||||
|
||||
if math.IsNaN(vi) {
|
||||
return true
|
||||
}
|
||||
@ -1919,16 +1953,7 @@ func (s vectorByReverseValueHeap) Len() int {
|
||||
}
|
||||
|
||||
func (s vectorByReverseValueHeap) Less(i, j int) bool {
|
||||
// We compare histograms based on their sum of observations.
|
||||
// TODO(beorn7): Is that what we want?
|
||||
vi, vj := s[i].F, s[j].F
|
||||
if s[i].H != nil {
|
||||
vi = s[i].H.Sum
|
||||
}
|
||||
if s[j].H != nil {
|
||||
vj = s[j].H.Sum
|
||||
}
|
||||
|
||||
if math.IsNaN(vi) {
|
||||
return true
|
||||
}
|
||||
|
||||
27
promql/promqltest/testdata/functions.test
vendored
27
promql/promqltest/testdata/functions.test
vendored
@ -466,7 +466,7 @@ eval instant at 0m label_replace(testmetric, "dst", "", "dst", ".*")
|
||||
eval_fail instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "(.*")
|
||||
|
||||
# label_replace fails when the destination label name is not a valid Prometheus label name.
|
||||
eval_fail instant at 0m label_replace(testmetric, "invalid-label-name", "", "src", "(.*)")
|
||||
eval_fail instant at 0m label_replace(testmetric, "\xff", "", "src", "(.*)")
|
||||
|
||||
# label_replace fails when there would be duplicated identical output label sets.
|
||||
eval_fail instant at 0m label_replace(testmetric, "src", "", "", "")
|
||||
@ -499,6 +499,8 @@ eval instant at 20s timestamp(metric)
|
||||
load 5m
|
||||
testmetric{src="a",src1="b",src2="c",dst="original-destination-value"} 0
|
||||
testmetric{src="d",src1="e",src2="f",dst="original-destination-value"} 1
|
||||
dup{label="a", this="a"} 1.0
|
||||
dup{label="b", this="a"} 1.0
|
||||
|
||||
# label_join joins all src values in order.
|
||||
eval instant at 0m label_join(testmetric, "dst", "-", "src", "src1", "src2")
|
||||
@ -530,6 +532,9 @@ eval instant at 0m label_join(testmetric1, "dst", ", ", "src", "src1", "src2")
|
||||
testmetric1{src="foo",src1="bar",src2="foobar",dst="foo, bar, foobar"} 0
|
||||
testmetric1{src="fizz",src1="buzz",src2="fizzbuzz",dst="fizz, buzz, fizzbuzz"} 1
|
||||
|
||||
eval_fail instant at 0m label_join(dup, "label", "", "this")
|
||||
expected_fail_message vector cannot contain metrics with the same labelset
|
||||
|
||||
clear
|
||||
|
||||
# Tests for vector.
|
||||
@ -637,6 +642,7 @@ load 5m
|
||||
http_requests{job="app-server", instance="1", group="production"} 0+60x10
|
||||
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
|
||||
http_requests{job="app-server", instance="1", group="canary"} 0+80x10
|
||||
http_requests{job="app-server", instance="2", group="canary"} {{schema:0 sum:1 count:1}}x15
|
||||
|
||||
eval_ordered instant at 50m sort(http_requests)
|
||||
http_requests{group="production", instance="0", job="api-server"} 100
|
||||
@ -1647,3 +1653,22 @@ load 1m
|
||||
|
||||
eval range from 0 to 5m step 1m round(mixed_metric)
|
||||
{} _ 1 2 3
|
||||
|
||||
# Test scalar() with histograms.
|
||||
load 1m
|
||||
metric{type="float", l="x"} 1
|
||||
metric{type="float", l="y"} 2
|
||||
metric{type="histogram", l="x"} {{schema:0 sum:1 count:1}}
|
||||
metric{type="histogram", l="x"} {{schema:0 sum:1 count:1}}
|
||||
|
||||
# Two floats in the vector.
|
||||
eval instant at 0m scalar(metric)
|
||||
NaN
|
||||
|
||||
# No floats in the vector.
|
||||
eval instant at 0m scalar({type="histogram"})
|
||||
NaN
|
||||
|
||||
# One float in the vector.
|
||||
eval instant at 0m scalar({l="x"})
|
||||
1
|
||||
|
||||
@ -1013,7 +1013,7 @@ eval instant at 5m sum(custom_buckets_histogram)
|
||||
|
||||
clear
|
||||
|
||||
# Test 'this native histogram metric is not a gauge' warning for rate
|
||||
# Test 'this native histogram metric is not a counter' warning for rate
|
||||
load 30s
|
||||
some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}}
|
||||
|
||||
@ -1022,7 +1022,7 @@ eval_warn instant at 30s rate(some_metric[1m])
|
||||
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
|
||||
|
||||
# Test the case where we have more than two points for rate
|
||||
eval_warn instant at 1m rate(some_metric[1m])
|
||||
eval_warn instant at 1m rate(some_metric[1m30s])
|
||||
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
|
||||
|
||||
clear
|
||||
@ -1032,20 +1032,20 @@ load 30s
|
||||
some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
|
||||
# Start and end with exponential, with custom in the middle.
|
||||
eval_warn instant at 1m rate(some_metric[1m])
|
||||
eval_warn instant at 1m rate(some_metric[1m30s])
|
||||
# Should produce no results.
|
||||
|
||||
# Start and end with custom, with exponential in the middle.
|
||||
eval_warn instant at 1m30s rate(some_metric[1m])
|
||||
eval_warn instant at 1m30s rate(some_metric[1m30s])
|
||||
# Should produce no results.
|
||||
|
||||
# Start with custom, end with exponential.
|
||||
eval_warn instant at 1m rate(some_metric[1m])
|
||||
# Should produce no results.
|
||||
# Start with custom, end with exponential. Return the exponential histogram divided by 30.
|
||||
eval instant at 1m rate(some_metric[1m])
|
||||
{} {{schema:0 sum:0.16666666666666666 count:0.13333333333333333 buckets:[0.03333333333333333 0.06666666666666667 0.03333333333333333]}}
|
||||
|
||||
# Start with exponential, end with custom.
|
||||
eval_warn instant at 30s rate(some_metric[1m])
|
||||
# Should produce no results.
|
||||
# Start with exponential, end with custom. Return the custom buckets histogram divided by 30.
|
||||
eval instant at 30s rate(some_metric[1m])
|
||||
{} {{schema:-53 sum:0.03333333333333333 count:0.03333333333333333 custom_values:[5 10] buckets:[0.03333333333333333]}}
|
||||
|
||||
clear
|
||||
|
||||
@ -1179,7 +1179,10 @@ eval_info range from 0 to 6m step 6m metric2 > metric2
|
||||
clear
|
||||
|
||||
load 6m
|
||||
nhcb_metric {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
nhcb_metric {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
|
||||
# If evaluating at 12m, the first two NHCBs have the same custom values
|
||||
# while the 3rd one has different ones.
|
||||
|
||||
eval_warn instant at 12m sum_over_time(nhcb_metric[13m])
|
||||
|
||||
@ -1206,6 +1209,38 @@ eval_warn instant at 12m rate(nhcb_metric[13m])
|
||||
eval instant at 12m resets(nhcb_metric[13m])
|
||||
{} 1
|
||||
|
||||
# Now doing the same again, but at 18m, where the first NHCB has
|
||||
# different custom_values compared to the other two. This now
|
||||
# works with no warning for increase() and rate(). No change
|
||||
# otherwise.
|
||||
|
||||
eval_warn instant at 18m sum_over_time(nhcb_metric[13m])
|
||||
|
||||
eval_warn instant at 18m avg_over_time(nhcb_metric[13m])
|
||||
|
||||
eval instant at 18m last_over_time(nhcb_metric[13m])
|
||||
nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
|
||||
eval instant at 18m count_over_time(nhcb_metric[13m])
|
||||
{} 3
|
||||
|
||||
eval instant at 18m present_over_time(nhcb_metric[13m])
|
||||
{} 1
|
||||
|
||||
eval instant at 18m changes(nhcb_metric[13m])
|
||||
{} 1
|
||||
|
||||
eval_warn instant at 18m delta(nhcb_metric[13m])
|
||||
|
||||
eval instant at 18m increase(nhcb_metric[13m])
|
||||
{} {{schema:-53 count:1.0833333333333333 sum:1.0833333333333333 custom_values:[5 10] buckets:[1.0833333333333333]}}
|
||||
|
||||
eval instant at 18m rate(nhcb_metric[13m])
|
||||
{} {{schema:-53 count:0.0013888888888888887 sum:0.0013888888888888887 custom_values:[5 10] buckets:[0.0013888888888888887]}}
|
||||
|
||||
eval instant at 18m resets(nhcb_metric[13m])
|
||||
{} 1
|
||||
|
||||
clear
|
||||
|
||||
load 1m
|
||||
|
||||
@ -25,6 +25,8 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -1386,8 +1388,17 @@ func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) {
|
||||
require.Equal(t, 1, seriesAdded)
|
||||
}
|
||||
|
||||
func makeTestMetrics(n int) []byte {
|
||||
// Construct a metrics string to parse
|
||||
func readTextParseTestMetrics(t testing.TB) []byte {
|
||||
t.Helper()
|
||||
|
||||
b, err := os.ReadFile("../model/textparse/testdata/alltypes.237mfs.prom.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func makeTestGauges(n int) []byte {
|
||||
sb := bytes.Buffer{}
|
||||
fmt.Fprintf(&sb, "# TYPE metric_a gauge\n")
|
||||
fmt.Fprintf(&sb, "# HELP metric_a help text\n")
|
||||
@ -1401,59 +1412,111 @@ func makeTestMetrics(n int) []byte {
|
||||
func promTextToProto(tb testing.TB, text []byte) []byte {
|
||||
tb.Helper()
|
||||
|
||||
d := expfmt.NewDecoder(bytes.NewReader(text), expfmt.TextVersion)
|
||||
|
||||
pb := &dto.MetricFamily{}
|
||||
if err := d.Decode(pb); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
o, err := proto.Marshal(pb)
|
||||
var p expfmt.TextParser
|
||||
fams, err := p.TextToMetricFamilies(bytes.NewReader(text))
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
// Order by name for the deterministic tests.
|
||||
var names []string
|
||||
for n := range fams {
|
||||
names = append(names, n)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
// Write first length, then binary protobuf.
|
||||
varintBuf := binary.AppendUvarint(nil, uint64(len(o)))
|
||||
buf.Write(varintBuf)
|
||||
buf.Write(o)
|
||||
for _, n := range names {
|
||||
o, err := proto.Marshal(fams[n])
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
|
||||
// Write first length, then binary protobuf.
|
||||
varintBuf := binary.AppendUvarint(nil, uint64(len(o)))
|
||||
buf.Write(varintBuf)
|
||||
buf.Write(o)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func TestPromTextToProto(t *testing.T) {
|
||||
metricsText := readTextParseTestMetrics(t)
|
||||
// TODO(bwplotka): Windows adds \r for new lines which is
|
||||
// not handled correctly in the expfmt parser, fix it.
|
||||
metricsText = bytes.ReplaceAll(metricsText, []byte("\r"), nil)
|
||||
|
||||
metricsProto := promTextToProto(t, metricsText)
|
||||
d := expfmt.NewDecoder(bytes.NewReader(metricsProto), expfmt.NewFormat(expfmt.TypeProtoDelim))
|
||||
|
||||
var got []string
|
||||
for {
|
||||
mf := &dto.MetricFamily{}
|
||||
if err := d.Decode(mf); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
got = append(got, mf.GetName())
|
||||
}
|
||||
require.Len(t, got, 237)
|
||||
// Check a few to see if those are not dups.
|
||||
require.Equal(t, "go_gc_cycles_automatic_gc_cycles_total", got[0])
|
||||
require.Equal(t, "prometheus_sd_kuma_fetch_duration_seconds", got[128])
|
||||
require.Equal(t, "promhttp_metric_handler_requests_total", got[236])
|
||||
}
|
||||
|
||||
// BenchmarkScrapeLoopAppend benchmarks a core append function in a scrapeLoop
|
||||
// that creates a new parser and goes through a byte slice from a single scrape.
|
||||
// Benchmark compares append function run across 2 dimensions:
|
||||
// *`data`: different sizes of metrics scraped e.g. one big gauge metric family
|
||||
// with a thousand series and more realistic scenario with common types.
|
||||
// *`fmt`: different scrape formats which will benchmark different parsers e.g.
|
||||
// promtext, omtext and promproto.
|
||||
//
|
||||
// Recommended CLI invocation:
|
||||
/*
|
||||
export bench=scrape-loop-v1 && go test \
|
||||
export bench=append-v1 && go test ./scrape/... \
|
||||
-run '^$' -bench '^BenchmarkScrapeLoopAppend' \
|
||||
-benchtime 5s -count 6 -cpu 2 -timeout 999m \
|
||||
| tee ${bench}.txt
|
||||
*/
|
||||
func BenchmarkScrapeLoopAppend(b *testing.B) {
|
||||
metricsText := makeTestMetrics(100)
|
||||
|
||||
// Create proto representation.
|
||||
metricsProto := promTextToProto(b, metricsText)
|
||||
|
||||
for _, bcase := range []struct {
|
||||
name string
|
||||
contentType string
|
||||
parsable []byte
|
||||
for _, data := range []struct {
|
||||
name string
|
||||
parsableText []byte
|
||||
}{
|
||||
{name: "PromText", contentType: "text/plain", parsable: metricsText},
|
||||
{name: "OMText", contentType: "application/openmetrics-text", parsable: metricsText},
|
||||
{name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto},
|
||||
{name: "1Fam1000Gauges", parsableText: makeTestGauges(2000)}, // ~68.1 KB, ~77.9 KB in proto.
|
||||
{name: "237FamsAllTypes", parsableText: readTextParseTestMetrics(b)}, // ~185.7 KB, ~70.6 KB in proto.
|
||||
} {
|
||||
b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) {
|
||||
ctx, sl := simpleTestScrapeLoop(b)
|
||||
b.Run(fmt.Sprintf("data=%v", data.name), func(b *testing.B) {
|
||||
metricsProto := promTextToProto(b, data.parsableText)
|
||||
|
||||
slApp := sl.appender(ctx)
|
||||
ts := time.Time{}
|
||||
for _, bcase := range []struct {
|
||||
name string
|
||||
contentType string
|
||||
parsable []byte
|
||||
}{
|
||||
{name: "PromText", contentType: "text/plain", parsable: data.parsableText},
|
||||
{name: "OMText", contentType: "application/openmetrics-text", parsable: data.parsableText},
|
||||
{name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto},
|
||||
} {
|
||||
b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) {
|
||||
ctx, sl := simpleTestScrapeLoop(b)
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ts = ts.Add(time.Second)
|
||||
_, _, _, err := sl.append(slApp, bcase.parsable, bcase.contentType, ts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
slApp := sl.appender(ctx)
|
||||
ts := time.Time{}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ts = ts.Add(time.Second)
|
||||
_, _, _, err := sl.append(slApp, bcase.parsable, bcase.contentType, ts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -4474,7 +4537,7 @@ func TestScrapeLoopCompression(t *testing.T) {
|
||||
simpleStorage := teststorage.New(t)
|
||||
defer simpleStorage.Close()
|
||||
|
||||
metricsText := makeTestMetrics(10)
|
||||
metricsText := makeTestGauges(10)
|
||||
|
||||
for _, tc := range []struct {
|
||||
enableCompression bool
|
||||
|
||||
@ -26,14 +26,14 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
|
||||
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
go-version: 1.23.x
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||
if: github.repository == 'prometheus/snmp_exporter'
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
|
||||
uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0
|
||||
with:
|
||||
args: --verbose
|
||||
version: v1.63.4
|
||||
|
||||
25
tsdb/db.go
25
tsdb/db.go
@ -992,9 +992,14 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
|
||||
db.metrics.maxBytes.Set(float64(maxBytes))
|
||||
db.metrics.retentionDuration.Set((time.Duration(opts.RetentionDuration) * time.Millisecond).Seconds())
|
||||
|
||||
// Calling db.reload() calls db.reloadBlocks() which requires cmtx to be locked.
|
||||
db.cmtx.Lock()
|
||||
if err := db.reload(); err != nil {
|
||||
db.cmtx.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
db.cmtx.Unlock()
|
||||
|
||||
// Set the min valid time for the ingested samples
|
||||
// to be no lower than the maxt of the last block.
|
||||
minValidTime := int64(math.MinInt64)
|
||||
@ -1363,6 +1368,7 @@ func (db *DB) CompactOOOHead(ctx context.Context) error {
|
||||
// Callback for testing.
|
||||
var compactOOOHeadTestingCallback func()
|
||||
|
||||
// The db.cmtx mutex should be held before calling this method.
|
||||
func (db *DB) compactOOOHead(ctx context.Context) error {
|
||||
if !db.oooWasEnabled.Load() {
|
||||
return nil
|
||||
@ -1417,6 +1423,7 @@ func (db *DB) compactOOOHead(ctx context.Context) error {
|
||||
|
||||
// compactOOO creates a new block per possible block range in the compactor's directory from the OOO Head given.
|
||||
// Each ULID in the result corresponds to a block in a unique time range.
|
||||
// The db.cmtx mutex should be held before calling this method.
|
||||
func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID, err error) {
|
||||
start := time.Now()
|
||||
|
||||
@ -1461,7 +1468,7 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID
|
||||
}
|
||||
|
||||
// compactHead compacts the given RangeHead.
|
||||
// The compaction mutex should be held before calling this method.
|
||||
// The db.cmtx should be held before calling this method.
|
||||
func (db *DB) compactHead(head *RangeHead) error {
|
||||
uids, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil)
|
||||
if err != nil {
|
||||
@ -1487,7 +1494,7 @@ func (db *DB) compactHead(head *RangeHead) error {
|
||||
}
|
||||
|
||||
// compactBlocks compacts all the eligible on-disk blocks.
|
||||
// The compaction mutex should be held before calling this method.
|
||||
// The db.cmtx should be held before calling this method.
|
||||
func (db *DB) compactBlocks() (err error) {
|
||||
// Check for compactions of multiple blocks.
|
||||
for {
|
||||
@ -1544,6 +1551,7 @@ func getBlock(allBlocks []*Block, id ulid.ULID) (*Block, bool) {
|
||||
}
|
||||
|
||||
// reload reloads blocks and truncates the head and its WAL.
|
||||
// The db.cmtx mutex should be held before calling this method.
|
||||
func (db *DB) reload() error {
|
||||
if err := db.reloadBlocks(); err != nil {
|
||||
return fmt.Errorf("reloadBlocks: %w", err)
|
||||
@ -1560,6 +1568,7 @@ func (db *DB) reload() error {
|
||||
|
||||
// reloadBlocks reloads blocks without touching head.
|
||||
// Blocks that are obsolete due to replacement or retention will be deleted.
|
||||
// The db.cmtx mutex should be held before calling this method.
|
||||
func (db *DB) reloadBlocks() (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
@ -1568,13 +1577,9 @@ func (db *DB) reloadBlocks() (err error) {
|
||||
db.metrics.reloads.Inc()
|
||||
}()
|
||||
|
||||
// Now that we reload TSDB every minute, there is a high chance for a race condition with a reload
|
||||
// triggered by CleanTombstones(). We need to lock the reload to avoid the situation where
|
||||
// a normal reload and CleanTombstones try to delete the same block.
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.mtx.RLock()
|
||||
loadable, corrupted, err := openBlocks(db.logger, db.dir, db.blocks, db.chunkPool, db.opts.PostingsDecoderFactory)
|
||||
db.mtx.RUnlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1600,11 +1605,13 @@ func (db *DB) reloadBlocks() (err error) {
|
||||
if len(corrupted) > 0 {
|
||||
// Corrupted but no child loaded for it.
|
||||
// Close all new blocks to release the lock for windows.
|
||||
db.mtx.RLock()
|
||||
for _, block := range loadable {
|
||||
if _, open := getBlock(db.blocks, block.Meta().ULID); !open {
|
||||
block.Close()
|
||||
}
|
||||
}
|
||||
db.mtx.RUnlock()
|
||||
errs := tsdb_errors.NewMulti()
|
||||
for ulid, err := range corrupted {
|
||||
if err != nil {
|
||||
@ -1643,8 +1650,10 @@ func (db *DB) reloadBlocks() (err error) {
|
||||
})
|
||||
|
||||
// Swap new blocks first for subsequently created readers to be seen.
|
||||
db.mtx.Lock()
|
||||
oldBlocks := db.blocks
|
||||
db.blocks = toLoad
|
||||
db.mtx.Unlock()
|
||||
|
||||
// Only check overlapping blocks when overlapping compaction is enabled.
|
||||
if db.opts.EnableOverlappingCompaction {
|
||||
|
||||
@ -1352,61 +1352,6 @@ func TestTombstoneCleanFail(t *testing.T) {
|
||||
require.Len(t, intersection(oldBlockDirs, actualBlockDirs), len(actualBlockDirs)-1)
|
||||
}
|
||||
|
||||
// TestTombstoneCleanRetentionLimitsRace tests that a CleanTombstones operation
|
||||
// and retention limit policies, when triggered at the same time,
|
||||
// won't race against each other.
|
||||
func TestTombstoneCleanRetentionLimitsRace(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
opts := DefaultOptions()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// We want to make sure that a race doesn't happen when a normal reload and a CleanTombstones()
|
||||
// reload try to delete the same block. Without the correct lock placement, it can happen if a
|
||||
// block is marked for deletion due to retention limits and also has tombstones to be cleaned at
|
||||
// the same time.
|
||||
//
|
||||
// That is something tricky to trigger, so let's try several times just to make sure.
|
||||
for i := 0; i < 20; i++ {
|
||||
t.Run(fmt.Sprintf("iteration%d", i), func(t *testing.T) {
|
||||
db := openTestDB(t, opts, nil)
|
||||
totalBlocks := 20
|
||||
dbDir := db.Dir()
|
||||
// Generate some blocks with old mint (near epoch).
|
||||
for j := 0; j < totalBlocks; j++ {
|
||||
blockDir := createBlock(t, dbDir, genSeries(10, 1, int64(j), int64(j)+1))
|
||||
block, err := OpenBlock(nil, blockDir, nil, nil)
|
||||
require.NoError(t, err)
|
||||
// Cover block with tombstones so it can be deleted with CleanTombstones() as well.
|
||||
tomb := tombstones.NewMemTombstones()
|
||||
tomb.AddInterval(0, tombstones.Interval{Mint: int64(j), Maxt: int64(j) + 1})
|
||||
block.tombstones = tomb
|
||||
|
||||
db.blocks = append(db.blocks, block)
|
||||
}
|
||||
|
||||
wg.Add(2)
|
||||
// Run reload and CleanTombstones together, with a small time window randomization
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
time.Sleep(time.Duration(rand.Float64() * 100 * float64(time.Millisecond)))
|
||||
require.NoError(t, db.reloadBlocks())
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
time.Sleep(time.Duration(rand.Float64() * 100 * float64(time.Millisecond)))
|
||||
require.NoError(t, db.CleanTombstones())
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func intersection(oldBlocks, actualBlocks []string) (intersection []string) {
|
||||
hash := make(map[string]bool)
|
||||
for _, e := range oldBlocks {
|
||||
|
||||
@ -117,15 +117,19 @@ func (h *headIndexReader) PostingsForAllLabelValues(ctx context.Context, name st
|
||||
func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
|
||||
series := make([]*memSeries, 0, 128)
|
||||
|
||||
notFoundSeriesCount := 0
|
||||
// Fetch all the series only once.
|
||||
for p.Next() {
|
||||
s := h.head.series.getByID(chunks.HeadSeriesRef(p.At()))
|
||||
if s == nil {
|
||||
h.head.logger.Debug("Looked up series not found")
|
||||
notFoundSeriesCount++
|
||||
} else {
|
||||
series = append(series, s)
|
||||
}
|
||||
}
|
||||
if notFoundSeriesCount > 0 {
|
||||
h.head.logger.Debug("Looked up series not found", "count", notFoundSeriesCount)
|
||||
}
|
||||
if err := p.Err(); err != nil {
|
||||
return index.ErrPostings(fmt.Errorf("expand postings: %w", err))
|
||||
}
|
||||
@ -150,11 +154,12 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou
|
||||
}
|
||||
|
||||
out := make([]storage.SeriesRef, 0, 128)
|
||||
notFoundSeriesCount := 0
|
||||
|
||||
for p.Next() {
|
||||
s := h.head.series.getByID(chunks.HeadSeriesRef(p.At()))
|
||||
if s == nil {
|
||||
h.head.logger.Debug("Looked up series not found")
|
||||
notFoundSeriesCount++
|
||||
continue
|
||||
}
|
||||
|
||||
@ -165,6 +170,9 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou
|
||||
|
||||
out = append(out, storage.SeriesRef(s.ref))
|
||||
}
|
||||
if notFoundSeriesCount > 0 {
|
||||
h.head.logger.Debug("Looked up series not found", "count", notFoundSeriesCount)
|
||||
}
|
||||
|
||||
return index.NewListPostings(out)
|
||||
}
|
||||
|
||||
@ -56,8 +56,13 @@ func (c *compressedResponseWriter) Close() {
|
||||
|
||||
// Constructs a new compressedResponseWriter based on client request headers.
|
||||
func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter {
|
||||
encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",")
|
||||
for _, encoding := range encodings {
|
||||
raw := req.Header.Get(acceptEncodingHeader)
|
||||
var (
|
||||
encoding string
|
||||
commaFound bool
|
||||
)
|
||||
for {
|
||||
encoding, raw, commaFound = strings.Cut(raw, ",")
|
||||
switch strings.TrimSpace(encoding) {
|
||||
case gzipEncoding:
|
||||
writer.Header().Set(contentEncodingHeader, gzipEncoding)
|
||||
@ -72,6 +77,9 @@ func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request)
|
||||
writer: zlib.NewWriter(writer),
|
||||
}
|
||||
}
|
||||
if !commaFound {
|
||||
break
|
||||
}
|
||||
}
|
||||
return &compressedResponseWriter{
|
||||
ResponseWriter: writer,
|
||||
|
||||
@ -18,6 +18,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/klauspost/compress/gzip"
|
||||
@ -72,6 +73,17 @@ func TestCompressionHandler_PlainText(t *testing.T) {
|
||||
require.Equal(t, expected, actual, "expected response with content")
|
||||
}
|
||||
|
||||
func BenchmarkNewCompressionHandler_MaliciousAcceptEncoding(b *testing.B) {
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodGet, "/whatever", nil)
|
||||
req.Header.Set("Accept-Encoding", strings.Repeat(",", http.DefaultMaxHeaderBytes))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for range b.N {
|
||||
newCompressedResponseWriter(rec, req)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompressionHandler_Gzip(t *testing.T) {
|
||||
tearDown := setup()
|
||||
defer tearDown()
|
||||
|
||||
@ -332,6 +332,7 @@ export const getUPlotOptions = (
|
||||
axes: [
|
||||
// X axis (time).
|
||||
{
|
||||
space: 80,
|
||||
labelSize: 20,
|
||||
stroke: light ? "#333" : "#eee",
|
||||
ticks: {
|
||||
@ -343,6 +344,33 @@ export const getUPlotOptions = (
|
||||
width: 2,
|
||||
dash: [],
|
||||
},
|
||||
values: [
|
||||
// See https://github.com/leeoniya/uPlot/tree/master/docs#axis--grid-opts and https://github.com/leeoniya/uPlot/issues/83.
|
||||
//
|
||||
// We want to achieve 24h-based time formatting instead of the default AM/PM-based time formatting.
|
||||
// We also want to render dates in an unambiguous format that uses the abbreviated month name instead of a US-centric DD/MM/YYYY format.
|
||||
//
|
||||
// The "tick incr" column defines the breakpoint in seconds at which the format changes.
|
||||
// The "default" column defines the default format for a tick at this breakpoint.
|
||||
// The "year"/"month"/"day"/"hour"/"min"/"sec" columns define additional values to display for year/month/day/... rollovers occurring around a tick.
|
||||
// The "mode" column value "1" means that rollover values will be concatenated with the default format (instead of replacing it).
|
||||
//
|
||||
// tick incr default year month day hour min sec mode
|
||||
// prettier-ignore
|
||||
[3600 * 24 * 365, "{YYYY}", null, null, null, null, null, null, 1],
|
||||
// prettier-ignore
|
||||
[3600 * 24 * 28, "{MMM}", "\n{YYYY}", null, null, null, null, null, 1],
|
||||
// prettier-ignore
|
||||
[3600 * 24, "{MMM} {D}", "\n{YYYY}", null, null, null, null, null, 1],
|
||||
// prettier-ignore
|
||||
[3600, "{HH}:{mm}", "\n{MMM} {D} '{YY}", null, "\n{MMM} {D}", null, null, null, 1],
|
||||
// prettier-ignore
|
||||
[60, "{HH}:{mm}", "\n{MMM} {D} '{YY}", null, "\n{MMM} {D}", null, null, null, 1],
|
||||
// prettier-ignore
|
||||
[1, "{HH}:{mm}:{ss}", "\n{MMM} {D} '{YY}", null, "\n{MMM} {D}", null, null, null, 1],
|
||||
// prettier-ignore
|
||||
[0.001, "{HH}:{mm}:{ss}.{fff}", "\n{MMM} {D} '{YY}", null, "\n{MMM} {D}", null, null, null, 1],
|
||||
],
|
||||
},
|
||||
// Y axis (sample value).
|
||||
{
|
||||
@ -382,7 +410,10 @@ export const getUPlotOptions = (
|
||||
(self: uPlot) => {
|
||||
// Disallow sub-second zoom as this cause inconsistenices in the X axis in uPlot.
|
||||
const leftVal = self.posToVal(self.select.left, "x");
|
||||
const rightVal = Math.max(self.posToVal(self.select.left + self.select.width, "x"), leftVal + 1);
|
||||
const rightVal = Math.max(
|
||||
self.posToVal(self.select.left + self.select.width, "x"),
|
||||
leftVal + 1
|
||||
);
|
||||
|
||||
onSelectRange(leftVal, rightVal);
|
||||
},
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user